[S390] qdio: call qdio_free also if qdio_shutdown fails
[linux-block.git] / drivers / s390 / cio / qdio_main.c
CommitLineData
779e6e1c
JG
1/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include <asm/atomic.h>
17#include <asm/debug.h>
18#include <asm/qdio.h>
19
20#include "cio.h"
21#include "css.h"
22#include "device.h"
23#include "qdio.h"
24#include "qdio_debug.h"
25#include "qdio_perf.h"
26
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL");
31
32static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
34{
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
39 int cc;
40
41 asm volatile(
42 " siga 0\n"
43 " ipm %0\n"
44 " srl %0,28\n"
45 : "=d" (cc)
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
47 return cc;
48}
49
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51{
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
55 int cc;
56
57 asm volatile(
58 " siga 0\n"
59 " ipm %0\n"
60 " srl %0,28\n"
61 : "=d" (cc)
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
63 return cc;
64}
65
66/**
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
72 *
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
75 */
76static inline int do_siga_output(unsigned long schid, unsigned long mask,
7a0b4cbc 77 unsigned int *bb, unsigned int fc)
779e6e1c
JG
78{
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
83
84 asm volatile(
85 " siga 0\n"
86 "0: ipm %0\n"
87 " srl %0,28\n"
88 "1:\n"
89 EX_TABLE(0b, 1b)
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91 : : "cc", "memory");
92 *bb = ((unsigned int) __fc) >> 31;
93 return cc;
94}
95
96static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97{
779e6e1c
JG
98 /* all done or next buffer state different */
99 if (ccq == 0 || ccq == 32)
100 return 0;
101 /* not all buffers processed */
102 if (ccq == 96 || ccq == 97)
103 return 1;
104 /* notify devices immediately */
22f99347 105 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
779e6e1c
JG
106 return -EIO;
107}
108
109/**
110 * qdio_do_eqbs - extract buffer states for QEBSM
111 * @q: queue to manipulate
112 * @state: state of the extracted buffers
113 * @start: buffer number to start at
114 * @count: count of buffers to examine
50f769df 115 * @auto_ack: automatically acknowledge buffers
779e6e1c 116 *
73ac36ea 117 * Returns the number of successfully extracted equal buffer states.
779e6e1c
JG
118 * Stops processing if a state is different from the last buffers state.
119 */
120static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
50f769df 121 int start, int count, int auto_ack)
779e6e1c
JG
122{
123 unsigned int ccq = 0;
124 int tmp_count = count, tmp_start = start;
125 int nr = q->nr;
126 int rc;
779e6e1c
JG
127
128 BUG_ON(!q->irq_ptr->sch_token);
23589d05 129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
779e6e1c
JG
130
131 if (!q->is_input_q)
132 nr += q->irq_ptr->nr_input_qs;
133again:
50f769df
JG
134 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
135 auto_ack);
779e6e1c
JG
136 rc = qdio_check_ccq(q, ccq);
137
138 /* At least one buffer was processed, return and extract the remaining
139 * buffers later.
140 */
23589d05
JG
141 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
779e6e1c 143 return (count - tmp_count);
23589d05 144 }
22f99347 145
779e6e1c 146 if (rc == 1) {
22f99347 147 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
779e6e1c
JG
148 goto again;
149 }
150
151 if (rc < 0) {
22f99347
JG
152 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
153 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
779e6e1c
JG
154 q->handler(q->irq_ptr->cdev,
155 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
156 0, -1, -1, q->irq_ptr->int_parm);
157 return 0;
158 }
159 return count - tmp_count;
160}
161
162/**
163 * qdio_do_sqbs - set buffer states for QEBSM
164 * @q: queue to manipulate
165 * @state: new state of the buffers
166 * @start: first buffer number to change
167 * @count: how many buffers to change
168 *
169 * Returns the number of successfully changed buffers.
170 * Does retrying until the specified count of buffer states is set or an
171 * error occurs.
172 */
173static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
174 int count)
175{
176 unsigned int ccq = 0;
177 int tmp_count = count, tmp_start = start;
178 int nr = q->nr;
179 int rc;
779e6e1c 180
50f769df
JG
181 if (!count)
182 return 0;
183
779e6e1c 184 BUG_ON(!q->irq_ptr->sch_token);
23589d05 185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
779e6e1c
JG
186
187 if (!q->is_input_q)
188 nr += q->irq_ptr->nr_input_qs;
189again:
190 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
191 rc = qdio_check_ccq(q, ccq);
192 if (rc == 1) {
22f99347 193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
23589d05 194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
779e6e1c
JG
195 goto again;
196 }
197 if (rc < 0) {
22f99347
JG
198 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
199 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
779e6e1c
JG
200 q->handler(q->irq_ptr->cdev,
201 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
202 0, -1, -1, q->irq_ptr->int_parm);
203 return 0;
204 }
205 WARN_ON(tmp_count);
206 return count - tmp_count;
207}
208
209/* returns number of examined buffers and their common state in *state */
210static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
50f769df
JG
211 unsigned char *state, unsigned int count,
212 int auto_ack)
779e6e1c
JG
213{
214 unsigned char __state = 0;
215 int i;
216
217 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
218 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
219
220 if (is_qebsm(q))
50f769df 221 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
779e6e1c
JG
222
223 for (i = 0; i < count; i++) {
224 if (!__state)
225 __state = q->slsb.val[bufnr];
226 else if (q->slsb.val[bufnr] != __state)
227 break;
228 bufnr = next_buf(bufnr);
229 }
230 *state = __state;
231 return i;
232}
233
234inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
50f769df 235 unsigned char *state, int auto_ack)
779e6e1c 236{
50f769df 237 return get_buf_states(q, bufnr, state, 1, auto_ack);
779e6e1c
JG
238}
239
240/* wrap-around safe setting of slsb states, returns number of changed buffers */
241static inline int set_buf_states(struct qdio_q *q, int bufnr,
242 unsigned char state, int count)
243{
244 int i;
245
246 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
247 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
248
249 if (is_qebsm(q))
250 return qdio_do_sqbs(q, state, bufnr, count);
251
252 for (i = 0; i < count; i++) {
253 xchg(&q->slsb.val[bufnr], state);
254 bufnr = next_buf(bufnr);
255 }
256 return count;
257}
258
259static inline int set_buf_state(struct qdio_q *q, int bufnr,
260 unsigned char state)
261{
262 return set_buf_states(q, bufnr, state, 1);
263}
264
265/* set slsb states to initial state */
266void qdio_init_buf_states(struct qdio_irq *irq_ptr)
267{
268 struct qdio_q *q;
269 int i;
270
271 for_each_input_queue(irq_ptr, q, i)
272 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
273 QDIO_MAX_BUFFERS_PER_Q);
274 for_each_output_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277}
278
279static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input)
281{
282 int cc;
283
284 if (!need_siga_sync(q))
285 return 0;
286
7a0b4cbc 287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
779e6e1c
JG
288 qdio_perf_stat_inc(&perf_stats.siga_sync);
289
290 cc = do_siga_sync(q->irq_ptr->schid, output, input);
22f99347
JG
291 if (cc)
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
779e6e1c
JG
293 return cc;
294}
295
296inline int qdio_siga_sync_q(struct qdio_q *q)
297{
298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask);
300 else
301 return qdio_siga_sync(q, q->mask, 0);
302}
303
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
7a0b4cbc 314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
779e6e1c 315{
779e6e1c 316 unsigned long schid;
7a0b4cbc
JG
317 unsigned int fc = 0;
318 u64 start_time = 0;
319 int cc;
779e6e1c 320
7a0b4cbc 321 if (q->u.out.use_enh_siga)
7a0f4755 322 fc = 3;
7a0b4cbc
JG
323
324 if (is_qebsm(q)) {
779e6e1c
JG
325 schid = q->irq_ptr->sch_token;
326 fc |= 0x80;
327 }
7a0b4cbc
JG
328 else
329 schid = *((u32 *)&q->irq_ptr->schid);
779e6e1c 330
779e6e1c 331again:
7a0b4cbc
JG
332 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333
334 /* hipersocket busy condition */
335 if (*busy_bit) {
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
58eb27cd 337
7a0b4cbc 338 if (!start_time) {
779e6e1c 339 start_time = get_usecs();
7a0b4cbc
JG
340 goto again;
341 }
342 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
779e6e1c
JG
343 goto again;
344 }
779e6e1c
JG
345 return cc;
346}
347
348static inline int qdio_siga_input(struct qdio_q *q)
349{
350 int cc;
351
22f99347 352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
779e6e1c
JG
353 qdio_perf_stat_inc(&perf_stats.siga_in);
354
355 cc = do_siga_input(q->irq_ptr->schid, q->mask);
356 if (cc)
22f99347 357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
779e6e1c
JG
358 return cc;
359}
360
361/* called from thinint inbound handler */
362void qdio_sync_after_thinint(struct qdio_q *q)
363{
364 if (pci_out_supported(q)) {
365 if (need_siga_sync_thinint(q))
366 qdio_siga_sync_all(q);
367 else if (need_siga_sync_out_thinint(q))
368 qdio_siga_sync_out(q);
369 } else
370 qdio_siga_sync_q(q);
371}
372
373inline void qdio_stop_polling(struct qdio_q *q)
374{
50f769df 375 if (!q->u.in.polling)
779e6e1c 376 return;
50f769df 377
779e6e1c
JG
378 q->u.in.polling = 0;
379 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
380
381 /* show the card that we are not polling anymore */
50f769df
JG
382 if (is_qebsm(q)) {
383 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
384 q->u.in.ack_count);
385 q->u.in.ack_count = 0;
386 } else
387 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
779e6e1c
JG
388}
389
50f769df 390static void announce_buffer_error(struct qdio_q *q, int count)
779e6e1c 391{
7a0b4cbc 392 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
50f769df
JG
393
394 /* special handling for no target buffer empty */
395 if ((!q->is_input_q &&
396 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
397 qdio_perf_stat_inc(&perf_stats.outbound_target_full);
398 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
399 q->first_to_check);
400 return;
401 }
402
22f99347
JG
403 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
404 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
50f769df 405 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
22f99347
JG
406 DBF_ERROR("F14:%2x F15:%2x",
407 q->sbal[q->first_to_check]->element[14].flags & 0xff,
408 q->sbal[q->first_to_check]->element[15].flags & 0xff);
50f769df 409}
779e6e1c 410
50f769df
JG
411static inline void inbound_primed(struct qdio_q *q, int count)
412{
413 int new;
414
415 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
416
417 /* for QEBSM the ACK was already set by EQBS */
418 if (is_qebsm(q)) {
419 if (!q->u.in.polling) {
420 q->u.in.polling = 1;
421 q->u.in.ack_count = count;
422 q->last_move_ftc = q->first_to_check;
423 return;
424 }
425
426 /* delete the previous ACK's */
427 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
428 q->u.in.ack_count);
429 q->u.in.ack_count = count;
430 q->last_move_ftc = q->first_to_check;
431 return;
432 }
433
434 /*
435 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
436 * or by the next inbound run.
437 */
438 new = add_buf(q->first_to_check, count - 1);
439 if (q->u.in.polling) {
440 /* reset the previous ACK but first set the new one */
441 set_buf_state(q, new, SLSB_P_INPUT_ACK);
442 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
443 }
444 else {
445 q->u.in.polling = 1;
446 set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK);
447 }
448
449 q->last_move_ftc = new;
450 count--;
451 if (!count)
452 return;
453
454 /*
455 * Need to change all PRIMED buffers to NOT_INIT, otherwise
456 * we're loosing initiative in the thinint code.
457 */
458 set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT,
459 count);
779e6e1c
JG
460}
461
462static int get_inbound_buffer_frontier(struct qdio_q *q)
463{
464 int count, stop;
465 unsigned char state;
466
779e6e1c
JG
467 /*
468 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
469 * would return 0.
470 */
471 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
472 stop = add_buf(q->first_to_check, count);
473
474 /*
475 * No siga sync here, as a PCI or we after a thin interrupt
476 * will sync the queues.
477 */
478
479 /* need to set count to 1 for non-qebsm */
480 if (!is_qebsm(q))
481 count = 1;
482
483check_next:
484 if (q->first_to_check == stop)
485 goto out;
486
50f769df 487 count = get_buf_states(q, q->first_to_check, &state, count, 1);
779e6e1c
JG
488 if (!count)
489 goto out;
490
491 switch (state) {
492 case SLSB_P_INPUT_PRIMED:
50f769df 493 inbound_primed(q, count);
779e6e1c
JG
494 /*
495 * No siga-sync needed for non-qebsm here, as the inbound queue
496 * will be synced on the next siga-r, resp.
497 * tiqdio_is_inbound_q_done will do the siga-sync.
498 */
499 q->first_to_check = add_buf(q->first_to_check, count);
500 atomic_sub(count, &q->nr_buf_used);
501 goto check_next;
502 case SLSB_P_INPUT_ERROR:
50f769df 503 announce_buffer_error(q, count);
779e6e1c
JG
504 /* process the buffer, the upper layer will take care of it */
505 q->first_to_check = add_buf(q->first_to_check, count);
506 atomic_sub(count, &q->nr_buf_used);
507 break;
508 case SLSB_CU_INPUT_EMPTY:
509 case SLSB_P_INPUT_NOT_INIT:
510 case SLSB_P_INPUT_ACK:
22f99347 511 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
779e6e1c
JG
512 break;
513 default:
514 BUG();
515 }
516out:
779e6e1c
JG
517 return q->first_to_check;
518}
519
520int qdio_inbound_q_moved(struct qdio_q *q)
521{
522 int bufnr;
523
524 bufnr = get_inbound_buffer_frontier(q);
525
526 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
527 if (!need_siga_sync(q) && !pci_out_supported(q))
528 q->u.in.timestamp = get_usecs();
529
22f99347 530 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
779e6e1c
JG
531 return 1;
532 } else
533 return 0;
534}
535
536static int qdio_inbound_q_done(struct qdio_q *q)
537{
9a1ce28a 538 unsigned char state = 0;
779e6e1c
JG
539
540 if (!atomic_read(&q->nr_buf_used))
541 return 1;
542
543 /*
544 * We need that one for synchronization with the adapter, as it
545 * does a kind of PCI avoidance.
546 */
547 qdio_siga_sync_q(q);
548
50f769df 549 get_buf_state(q, q->first_to_check, &state, 0);
779e6e1c
JG
550 if (state == SLSB_P_INPUT_PRIMED)
551 /* we got something to do */
552 return 0;
553
554 /* on VM, we don't poll, so the q is always done here */
555 if (need_siga_sync(q) || pci_out_supported(q))
556 return 1;
557
558 /*
559 * At this point we know, that inbound first_to_check
560 * has (probably) not moved (see qdio_inbound_processing).
561 */
562 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
22f99347
JG
563 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
564 q->first_to_check);
779e6e1c
JG
565 return 1;
566 } else {
22f99347
JG
567 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
568 q->first_to_check);
779e6e1c
JG
569 return 0;
570 }
571}
572
573void qdio_kick_inbound_handler(struct qdio_q *q)
574{
575 int count, start, end;
779e6e1c
JG
576
577 qdio_perf_stat_inc(&perf_stats.inbound_handler);
578
579 start = q->first_to_kick;
580 end = q->first_to_check;
581 if (end >= start)
582 count = end - start;
583 else
584 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
585
22f99347 586 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
779e6e1c
JG
587
588 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
589 return;
590
591 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
592 start, count, q->irq_ptr->int_parm);
593
594 /* for the next time */
595 q->first_to_kick = q->first_to_check;
596 q->qdio_error = 0;
597}
598
599static void __qdio_inbound_processing(struct qdio_q *q)
600{
601 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
602again:
603 if (!qdio_inbound_q_moved(q))
604 return;
605
606 qdio_kick_inbound_handler(q);
607
608 if (!qdio_inbound_q_done(q))
609 /* means poll time is not yet over */
610 goto again;
611
612 qdio_stop_polling(q);
613 /*
614 * We need to check again to not lose initiative after
615 * resetting the ACK state.
616 */
617 if (!qdio_inbound_q_done(q))
618 goto again;
619}
620
621/* inbound tasklet */
622void qdio_inbound_processing(unsigned long data)
623{
624 struct qdio_q *q = (struct qdio_q *)data;
625 __qdio_inbound_processing(q);
626}
627
628static int get_outbound_buffer_frontier(struct qdio_q *q)
629{
630 int count, stop;
631 unsigned char state;
632
633 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
634 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
635 qdio_siga_sync_q(q);
636
637 /*
638 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
639 * would return 0.
640 */
641 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
642 stop = add_buf(q->first_to_check, count);
643
644 /* need to set count to 1 for non-qebsm */
645 if (!is_qebsm(q))
646 count = 1;
647
648check_next:
649 if (q->first_to_check == stop)
650 return q->first_to_check;
651
50f769df 652 count = get_buf_states(q, q->first_to_check, &state, count, 0);
779e6e1c
JG
653 if (!count)
654 return q->first_to_check;
655
656 switch (state) {
657 case SLSB_P_OUTPUT_EMPTY:
658 /* the adapter got it */
22f99347 659 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
779e6e1c
JG
660
661 atomic_sub(count, &q->nr_buf_used);
662 q->first_to_check = add_buf(q->first_to_check, count);
663 /*
664 * We fetch all buffer states at once. get_buf_states may
665 * return count < stop. For QEBSM we do not loop.
666 */
667 if (is_qebsm(q))
668 break;
669 goto check_next;
670 case SLSB_P_OUTPUT_ERROR:
50f769df 671 announce_buffer_error(q, count);
779e6e1c
JG
672 /* process the buffer, the upper layer will take care of it */
673 q->first_to_check = add_buf(q->first_to_check, count);
674 atomic_sub(count, &q->nr_buf_used);
675 break;
676 case SLSB_CU_OUTPUT_PRIMED:
677 /* the adapter has not fetched the output yet */
22f99347 678 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
779e6e1c
JG
679 break;
680 case SLSB_P_OUTPUT_NOT_INIT:
681 case SLSB_P_OUTPUT_HALTED:
682 break;
683 default:
684 BUG();
685 }
686 return q->first_to_check;
687}
688
689/* all buffers processed? */
690static inline int qdio_outbound_q_done(struct qdio_q *q)
691{
692 return atomic_read(&q->nr_buf_used) == 0;
693}
694
695static inline int qdio_outbound_q_moved(struct qdio_q *q)
696{
697 int bufnr;
698
699 bufnr = get_outbound_buffer_frontier(q);
700
701 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
702 q->last_move_ftc = bufnr;
22f99347 703 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
779e6e1c
JG
704 return 1;
705 } else
706 return 0;
707}
708
779e6e1c
JG
709static void qdio_kick_outbound_q(struct qdio_q *q)
710{
7a0b4cbc
JG
711 unsigned int busy_bit;
712 int cc;
779e6e1c
JG
713
714 if (!need_siga_out(q))
715 return;
716
7a0b4cbc
JG
717 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
718 qdio_perf_stat_inc(&perf_stats.siga_out);
719
720 cc = qdio_siga_output(q, &busy_bit);
721 switch (cc) {
779e6e1c 722 case 0:
779e6e1c 723 break;
7a0b4cbc
JG
724 case 2:
725 if (busy_bit) {
726 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
727 q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY;
728 } else {
729 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d",
730 q->nr);
731 q->qdio_error = cc;
779e6e1c 732 }
7a0b4cbc
JG
733 break;
734 case 1:
735 case 3:
736 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
737 q->qdio_error = cc;
738 break;
779e6e1c
JG
739 }
740}
741
742static void qdio_kick_outbound_handler(struct qdio_q *q)
743{
744 int start, end, count;
779e6e1c
JG
745
746 start = q->first_to_kick;
747 end = q->last_move_ftc;
748 if (end >= start)
749 count = end - start;
750 else
751 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
752
22f99347
JG
753 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
754 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
779e6e1c
JG
755
756 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
757 return;
758
759 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
760 q->irq_ptr->int_parm);
761
762 /* for the next time: */
763 q->first_to_kick = q->last_move_ftc;
764 q->qdio_error = 0;
765}
766
767static void __qdio_outbound_processing(struct qdio_q *q)
768{
7a0b4cbc 769 unsigned long flags;
779e6e1c
JG
770
771 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
7a0b4cbc 772 spin_lock_irqsave(&q->lock, flags);
779e6e1c
JG
773
774 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
775
776 if (qdio_outbound_q_moved(q))
777 qdio_kick_outbound_handler(q);
778
7a0b4cbc
JG
779 spin_unlock_irqrestore(&q->lock, flags);
780
c38f9608 781 if (queue_type(q) == QDIO_ZFCP_QFMT)
779e6e1c 782 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
c38f9608 783 goto sched;
779e6e1c
JG
784
785 /* bail out for HiperSockets unicast queues */
786 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
787 return;
788
4bcb3a37 789 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
c38f9608
JG
790 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
791 goto sched;
4bcb3a37 792
779e6e1c
JG
793 if (q->u.out.pci_out_enabled)
794 return;
795
796 /*
797 * Now we know that queue type is either qeth without pci enabled
798 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
799 * EMPTY is noticed and outbound_handler is called after some time.
800 */
801 if (qdio_outbound_q_done(q))
802 del_timer(&q->u.out.timer);
803 else {
804 if (!timer_pending(&q->u.out.timer)) {
805 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
806 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
807 }
808 }
c38f9608
JG
809 return;
810
811sched:
812 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
813 return;
814 tasklet_schedule(&q->tasklet);
779e6e1c
JG
815}
816
817/* outbound tasklet */
818void qdio_outbound_processing(unsigned long data)
819{
820 struct qdio_q *q = (struct qdio_q *)data;
821 __qdio_outbound_processing(q);
822}
823
824void qdio_outbound_timer(unsigned long data)
825{
826 struct qdio_q *q = (struct qdio_q *)data;
c38f9608
JG
827
828 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
829 return;
779e6e1c
JG
830 tasklet_schedule(&q->tasklet);
831}
832
833/* called from thinint inbound tasklet */
834void qdio_check_outbound_after_thinint(struct qdio_q *q)
835{
836 struct qdio_q *out;
837 int i;
838
839 if (!pci_out_supported(q))
840 return;
841
842 for_each_output_queue(q->irq_ptr, out, i)
843 if (!qdio_outbound_q_done(out))
844 tasklet_schedule(&out->tasklet);
845}
846
847static inline void qdio_set_state(struct qdio_irq *irq_ptr,
848 enum qdio_irq_states state)
849{
22f99347 850 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
779e6e1c
JG
851
852 irq_ptr->state = state;
853 mb();
854}
855
22f99347 856static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
779e6e1c 857{
779e6e1c 858 if (irb->esw.esw0.erw.cons) {
22f99347
JG
859 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
860 DBF_ERROR_HEX(irb, 64);
861 DBF_ERROR_HEX(irb->ecw, 64);
779e6e1c
JG
862 }
863}
864
865/* PCI interrupt handler */
866static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
867{
868 int i;
869 struct qdio_q *q;
870
c38f9608
JG
871 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
872 return;
873
779e6e1c
JG
874 qdio_perf_stat_inc(&perf_stats.pci_int);
875
876 for_each_input_queue(irq_ptr, q, i)
877 tasklet_schedule(&q->tasklet);
878
879 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
880 return;
881
882 for_each_output_queue(irq_ptr, q, i) {
883 if (qdio_outbound_q_done(q))
884 continue;
885
886 if (!siga_syncs_out_pci(q))
887 qdio_siga_sync_q(q);
888
889 tasklet_schedule(&q->tasklet);
890 }
891}
892
893static void qdio_handle_activate_check(struct ccw_device *cdev,
894 unsigned long intparm, int cstat, int dstat)
895{
896 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
897 struct qdio_q *q;
779e6e1c 898
22f99347
JG
899 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
900 DBF_ERROR("intp :%lx", intparm);
901 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
779e6e1c
JG
902
903 if (irq_ptr->nr_input_qs) {
904 q = irq_ptr->input_qs[0];
905 } else if (irq_ptr->nr_output_qs) {
906 q = irq_ptr->output_qs[0];
907 } else {
908 dump_stack();
909 goto no_handler;
910 }
911 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
912 0, -1, -1, irq_ptr->int_parm);
913no_handler:
914 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
915}
916
917static void qdio_call_shutdown(struct work_struct *work)
918{
919 struct ccw_device_private *priv;
920 struct ccw_device *cdev;
921
922 priv = container_of(work, struct ccw_device_private, kick_work);
923 cdev = priv->cdev;
924 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
925 put_device(&cdev->dev);
926}
927
928static void qdio_int_error(struct ccw_device *cdev)
929{
930 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
931
932 switch (irq_ptr->state) {
933 case QDIO_IRQ_STATE_INACTIVE:
934 case QDIO_IRQ_STATE_CLEANUP:
935 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
936 break;
937 case QDIO_IRQ_STATE_ESTABLISHED:
938 case QDIO_IRQ_STATE_ACTIVE:
939 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
940 if (get_device(&cdev->dev)) {
941 /* Can't call shutdown from interrupt context. */
942 PREPARE_WORK(&cdev->private->kick_work,
943 qdio_call_shutdown);
944 queue_work(ccw_device_work, &cdev->private->kick_work);
945 }
946 break;
947 default:
948 WARN_ON(1);
949 }
950 wake_up(&cdev->private->wait_q);
951}
952
953static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
22f99347 954 int dstat)
779e6e1c
JG
955{
956 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
957
958 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
22f99347 959 DBF_ERROR("EQ:ck con");
779e6e1c
JG
960 goto error;
961 }
962
963 if (!(dstat & DEV_STAT_DEV_END)) {
22f99347 964 DBF_ERROR("EQ:no dev");
779e6e1c
JG
965 goto error;
966 }
967
968 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
22f99347 969 DBF_ERROR("EQ: bad io");
779e6e1c
JG
970 goto error;
971 }
972 return 0;
973error:
22f99347
JG
974 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
975 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
976
779e6e1c
JG
977 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
978 return 1;
979}
980
981static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
982 int dstat)
983{
984 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
779e6e1c 985
22f99347 986 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
779e6e1c
JG
987 if (!qdio_establish_check_errors(cdev, cstat, dstat))
988 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
989}
990
991/* qdio interrupt handler */
992void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
993 struct irb *irb)
994{
995 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
996 int cstat, dstat;
779e6e1c
JG
997
998 qdio_perf_stat_inc(&perf_stats.qdio_int);
999
1000 if (!intparm || !irq_ptr) {
22f99347 1001 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
779e6e1c
JG
1002 return;
1003 }
1004
1005 if (IS_ERR(irb)) {
1006 switch (PTR_ERR(irb)) {
1007 case -EIO:
22f99347 1008 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
779e6e1c
JG
1009 return;
1010 case -ETIMEDOUT:
22f99347 1011 DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no);
779e6e1c
JG
1012 qdio_int_error(cdev);
1013 return;
1014 default:
1015 WARN_ON(1);
1016 return;
1017 }
1018 }
22f99347 1019 qdio_irq_check_sense(irq_ptr, irb);
779e6e1c
JG
1020
1021 cstat = irb->scsw.cmd.cstat;
1022 dstat = irb->scsw.cmd.dstat;
1023
1024 switch (irq_ptr->state) {
1025 case QDIO_IRQ_STATE_INACTIVE:
1026 qdio_establish_handle_irq(cdev, cstat, dstat);
1027 break;
1028
1029 case QDIO_IRQ_STATE_CLEANUP:
1030 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1031 break;
1032
1033 case QDIO_IRQ_STATE_ESTABLISHED:
1034 case QDIO_IRQ_STATE_ACTIVE:
1035 if (cstat & SCHN_STAT_PCI) {
1036 qdio_int_handler_pci(irq_ptr);
1037 /* no state change so no need to wake up wait_q */
1038 return;
1039 }
1040 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1041 qdio_handle_activate_check(cdev, intparm, cstat,
1042 dstat);
1043 break;
1044 }
1045 default:
1046 WARN_ON(1);
1047 }
1048 wake_up(&cdev->private->wait_q);
1049}
1050
1051/**
1052 * qdio_get_ssqd_desc - get qdio subchannel description
1053 * @cdev: ccw device to get description for
bbd50e17 1054 * @data: where to store the ssqd
779e6e1c 1055 *
bbd50e17
JG
1056 * Returns 0 or an error code. The results of the chsc are stored in the
1057 * specified structure.
779e6e1c 1058 */
bbd50e17
JG
1059int qdio_get_ssqd_desc(struct ccw_device *cdev,
1060 struct qdio_ssqd_desc *data)
779e6e1c 1061{
779e6e1c 1062
bbd50e17
JG
1063 if (!cdev || !cdev->private)
1064 return -EINVAL;
1065
22f99347 1066 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
bbd50e17 1067 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
779e6e1c
JG
1068}
1069EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1070
1071/**
1072 * qdio_cleanup - shutdown queues and free data structures
1073 * @cdev: associated ccw device
1074 * @how: use halt or clear to shutdown
1075 *
700e982f
JG
1076 * This function calls qdio_shutdown() for @cdev with method @how.
1077 * and qdio_free(). The qdio_free() return value is ignored since
1078 * !irq_ptr is already checked.
779e6e1c
JG
1079 */
1080int qdio_cleanup(struct ccw_device *cdev, int how)
1081{
22f99347 1082 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
779e6e1c
JG
1083 int rc;
1084
779e6e1c
JG
1085 if (!irq_ptr)
1086 return -ENODEV;
1087
779e6e1c 1088 rc = qdio_shutdown(cdev, how);
700e982f
JG
1089
1090 qdio_free(cdev);
779e6e1c
JG
1091 return rc;
1092}
1093EXPORT_SYMBOL_GPL(qdio_cleanup);
1094
1095static void qdio_shutdown_queues(struct ccw_device *cdev)
1096{
1097 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1098 struct qdio_q *q;
1099 int i;
1100
1101 for_each_input_queue(irq_ptr, q, i)
c38f9608 1102 tasklet_kill(&q->tasklet);
779e6e1c
JG
1103
1104 for_each_output_queue(irq_ptr, q, i) {
779e6e1c 1105 del_timer(&q->u.out.timer);
c38f9608 1106 tasklet_kill(&q->tasklet);
779e6e1c
JG
1107 }
1108}
1109
1110/**
1111 * qdio_shutdown - shut down a qdio subchannel
1112 * @cdev: associated ccw device
1113 * @how: use halt or clear to shutdown
1114 */
1115int qdio_shutdown(struct ccw_device *cdev, int how)
1116{
22f99347 1117 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
779e6e1c
JG
1118 int rc;
1119 unsigned long flags;
779e6e1c 1120
779e6e1c
JG
1121 if (!irq_ptr)
1122 return -ENODEV;
1123
b4547402 1124 BUG_ON(irqs_disabled());
22f99347
JG
1125 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1126
779e6e1c
JG
1127 mutex_lock(&irq_ptr->setup_mutex);
1128 /*
1129 * Subchannel was already shot down. We cannot prevent being called
1130 * twice since cio may trigger a shutdown asynchronously.
1131 */
1132 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1133 mutex_unlock(&irq_ptr->setup_mutex);
1134 return 0;
1135 }
1136
c38f9608
JG
1137 /*
1138 * Indicate that the device is going down. Scheduling the queue
1139 * tasklets is forbidden from here on.
1140 */
1141 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1142
779e6e1c
JG
1143 tiqdio_remove_input_queues(irq_ptr);
1144 qdio_shutdown_queues(cdev);
1145 qdio_shutdown_debug_entries(irq_ptr, cdev);
1146
1147 /* cleanup subchannel */
1148 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1149
1150 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1151 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1152 else
1153 /* default behaviour is halt */
1154 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1155 if (rc) {
22f99347
JG
1156 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1157 DBF_ERROR("rc:%4d", rc);
779e6e1c
JG
1158 goto no_cleanup;
1159 }
1160
1161 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1162 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1163 wait_event_interruptible_timeout(cdev->private->wait_q,
1164 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1165 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1166 10 * HZ);
1167 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1168
1169no_cleanup:
1170 qdio_shutdown_thinint(irq_ptr);
1171
1172 /* restore interrupt handler */
1173 if ((void *)cdev->handler == (void *)qdio_int_handler)
1174 cdev->handler = irq_ptr->orig_handler;
1175 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1176
1177 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1178 mutex_unlock(&irq_ptr->setup_mutex);
779e6e1c
JG
1179 if (rc)
1180 return rc;
1181 return 0;
1182}
1183EXPORT_SYMBOL_GPL(qdio_shutdown);
1184
1185/**
1186 * qdio_free - free data structures for a qdio subchannel
1187 * @cdev: associated ccw device
1188 */
1189int qdio_free(struct ccw_device *cdev)
1190{
22f99347 1191 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
58eb27cd 1192
779e6e1c
JG
1193 if (!irq_ptr)
1194 return -ENODEV;
1195
22f99347 1196 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
779e6e1c 1197 mutex_lock(&irq_ptr->setup_mutex);
22f99347
JG
1198
1199 if (irq_ptr->debug_area != NULL) {
1200 debug_unregister(irq_ptr->debug_area);
1201 irq_ptr->debug_area = NULL;
1202 }
779e6e1c
JG
1203 cdev->private->qdio_data = NULL;
1204 mutex_unlock(&irq_ptr->setup_mutex);
1205
1206 qdio_release_memory(irq_ptr);
1207 return 0;
1208}
1209EXPORT_SYMBOL_GPL(qdio_free);
1210
1211/**
1212 * qdio_initialize - allocate and establish queues for a qdio subchannel
1213 * @init_data: initialization data
1214 *
1215 * This function first allocates queues via qdio_allocate() and on success
1216 * establishes them via qdio_establish().
1217 */
1218int qdio_initialize(struct qdio_initialize *init_data)
1219{
1220 int rc;
779e6e1c
JG
1221
1222 rc = qdio_allocate(init_data);
1223 if (rc)
1224 return rc;
1225
1226 rc = qdio_establish(init_data);
1227 if (rc)
1228 qdio_free(init_data->cdev);
1229 return rc;
1230}
1231EXPORT_SYMBOL_GPL(qdio_initialize);
1232
1233/**
1234 * qdio_allocate - allocate qdio queues and associated data
1235 * @init_data: initialization data
1236 */
1237int qdio_allocate(struct qdio_initialize *init_data)
1238{
1239 struct qdio_irq *irq_ptr;
779e6e1c 1240
22f99347 1241 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
779e6e1c
JG
1242
1243 if ((init_data->no_input_qs && !init_data->input_handler) ||
1244 (init_data->no_output_qs && !init_data->output_handler))
1245 return -EINVAL;
1246
1247 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1248 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1249 return -EINVAL;
1250
1251 if ((!init_data->input_sbal_addr_array) ||
1252 (!init_data->output_sbal_addr_array))
1253 return -EINVAL;
1254
779e6e1c
JG
1255 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1256 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1257 if (!irq_ptr)
1258 goto out_err;
779e6e1c
JG
1259
1260 mutex_init(&irq_ptr->setup_mutex);
22f99347 1261 qdio_allocate_dbf(init_data, irq_ptr);
779e6e1c
JG
1262
1263 /*
1264 * Allocate a page for the chsc calls in qdio_establish.
1265 * Must be pre-allocated since a zfcp recovery will call
1266 * qdio_establish. In case of low memory and swap on a zfcp disk
1267 * we may not be able to allocate memory otherwise.
1268 */
1269 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1270 if (!irq_ptr->chsc_page)
1271 goto out_rel;
1272
1273 /* qdr is used in ccw1.cda which is u32 */
3b8e3004 1274 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
779e6e1c
JG
1275 if (!irq_ptr->qdr)
1276 goto out_rel;
1277 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1278
779e6e1c
JG
1279 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1280 init_data->no_output_qs))
1281 goto out_rel;
1282
1283 init_data->cdev->private->qdio_data = irq_ptr;
1284 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1285 return 0;
1286out_rel:
1287 qdio_release_memory(irq_ptr);
1288out_err:
1289 return -ENOMEM;
1290}
1291EXPORT_SYMBOL_GPL(qdio_allocate);
1292
1293/**
1294 * qdio_establish - establish queues on a qdio subchannel
1295 * @init_data: initialization data
1296 */
1297int qdio_establish(struct qdio_initialize *init_data)
1298{
779e6e1c
JG
1299 struct qdio_irq *irq_ptr;
1300 struct ccw_device *cdev = init_data->cdev;
1301 unsigned long saveflags;
1302 int rc;
1303
22f99347 1304 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
58eb27cd 1305
779e6e1c
JG
1306 irq_ptr = cdev->private->qdio_data;
1307 if (!irq_ptr)
1308 return -ENODEV;
1309
1310 if (cdev->private->state != DEV_STATE_ONLINE)
1311 return -EINVAL;
1312
779e6e1c
JG
1313 mutex_lock(&irq_ptr->setup_mutex);
1314 qdio_setup_irq(init_data);
1315
1316 rc = qdio_establish_thinint(irq_ptr);
1317 if (rc) {
1318 mutex_unlock(&irq_ptr->setup_mutex);
1319 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1320 return rc;
1321 }
1322
1323 /* establish q */
1324 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1325 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1326 irq_ptr->ccw.count = irq_ptr->equeue.count;
1327 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1328
1329 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1330 ccw_device_set_options_mask(cdev, 0);
1331
1332 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1333 if (rc) {
22f99347
JG
1334 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1335 DBF_ERROR("rc:%4x", rc);
779e6e1c
JG
1336 }
1337 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1338
1339 if (rc) {
1340 mutex_unlock(&irq_ptr->setup_mutex);
1341 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1342 return rc;
1343 }
1344
1345 wait_event_interruptible_timeout(cdev->private->wait_q,
1346 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1347 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1348
1349 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1350 mutex_unlock(&irq_ptr->setup_mutex);
1351 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1352 return -EIO;
1353 }
1354
1355 qdio_setup_ssqd_info(irq_ptr);
22f99347
JG
1356 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1357 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
779e6e1c
JG
1358
1359 /* qebsm is now setup if available, initialize buffer states */
1360 qdio_init_buf_states(irq_ptr);
1361
1362 mutex_unlock(&irq_ptr->setup_mutex);
1363 qdio_print_subchannel_info(irq_ptr, cdev);
1364 qdio_setup_debug_entries(irq_ptr, cdev);
1365 return 0;
1366}
1367EXPORT_SYMBOL_GPL(qdio_establish);
1368
1369/**
1370 * qdio_activate - activate queues on a qdio subchannel
1371 * @cdev: associated cdev
1372 */
1373int qdio_activate(struct ccw_device *cdev)
1374{
1375 struct qdio_irq *irq_ptr;
1376 int rc;
1377 unsigned long saveflags;
779e6e1c 1378
22f99347 1379 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
58eb27cd 1380
779e6e1c
JG
1381 irq_ptr = cdev->private->qdio_data;
1382 if (!irq_ptr)
1383 return -ENODEV;
1384
1385 if (cdev->private->state != DEV_STATE_ONLINE)
1386 return -EINVAL;
1387
1388 mutex_lock(&irq_ptr->setup_mutex);
1389 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1390 rc = -EBUSY;
1391 goto out;
1392 }
1393
779e6e1c
JG
1394 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1395 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1396 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1397 irq_ptr->ccw.cda = 0;
1398
1399 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1400 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1401
1402 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1403 0, DOIO_DENY_PREFETCH);
1404 if (rc) {
22f99347
JG
1405 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1406 DBF_ERROR("rc:%4x", rc);
779e6e1c
JG
1407 }
1408 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1409
1410 if (rc)
1411 goto out;
1412
1413 if (is_thinint_irq(irq_ptr))
1414 tiqdio_add_input_queues(irq_ptr);
1415
1416 /* wait for subchannel to become active */
1417 msleep(5);
1418
1419 switch (irq_ptr->state) {
1420 case QDIO_IRQ_STATE_STOPPED:
1421 case QDIO_IRQ_STATE_ERR:
e4c14e20
JG
1422 rc = -EIO;
1423 break;
779e6e1c
JG
1424 default:
1425 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1426 rc = 0;
1427 }
1428out:
1429 mutex_unlock(&irq_ptr->setup_mutex);
1430 return rc;
1431}
1432EXPORT_SYMBOL_GPL(qdio_activate);
1433
1434static inline int buf_in_between(int bufnr, int start, int count)
1435{
1436 int end = add_buf(start, count);
1437
1438 if (end > start) {
1439 if (bufnr >= start && bufnr < end)
1440 return 1;
1441 else
1442 return 0;
1443 }
1444
1445 /* wrap-around case */
1446 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1447 (bufnr < end))
1448 return 1;
1449 else
1450 return 0;
1451}
1452
1453/**
1454 * handle_inbound - reset processed input buffers
1455 * @q: queue containing the buffers
1456 * @callflags: flags
1457 * @bufnr: first buffer to process
1458 * @count: how many buffers are emptied
1459 */
1460static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1461 int bufnr, int count)
1462{
7a0b4cbc 1463 int used, cc, diff;
779e6e1c 1464
50f769df
JG
1465 if (!q->u.in.polling)
1466 goto set;
1467
1468 /* protect against stop polling setting an ACK for an emptied slsb */
1469 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1470 /* overwriting everything, just delete polling status */
1471 q->u.in.polling = 0;
1472 q->u.in.ack_count = 0;
1473 goto set;
1474 } else if (buf_in_between(q->last_move_ftc, bufnr, count)) {
1475 if (is_qebsm(q)) {
1476 /* partial overwrite, just update last_move_ftc */
1477 diff = add_buf(bufnr, count);
1478 diff = sub_buf(diff, q->last_move_ftc);
1479 q->u.in.ack_count -= diff;
1480 if (q->u.in.ack_count <= 0) {
1481 q->u.in.polling = 0;
1482 q->u.in.ack_count = 0;
1483 /* TODO: must we set last_move_ftc to something meaningful? */
1484 goto set;
1485 }
1486 q->last_move_ftc = add_buf(q->last_move_ftc, diff);
1487 }
1488 else
1489 /* the only ACK will be deleted, so stop polling */
779e6e1c 1490 q->u.in.polling = 0;
50f769df 1491 }
779e6e1c 1492
50f769df 1493set:
779e6e1c 1494 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
779e6e1c
JG
1495
1496 used = atomic_add_return(count, &q->nr_buf_used) - count;
1497 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1498
1499 /* no need to signal as long as the adapter had free buffers */
1500 if (used)
1501 return;
1502
1503 if (need_siga_in(q)) {
7a0b4cbc
JG
1504 cc = qdio_siga_input(q);
1505 if (cc)
1506 q->qdio_error = cc;
779e6e1c
JG
1507 }
1508}
1509
1510/**
1511 * handle_outbound - process filled outbound buffers
1512 * @q: queue containing the buffers
1513 * @callflags: flags
1514 * @bufnr: first buffer to process
1515 * @count: how many buffers are filled
1516 */
1517static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1518 int bufnr, int count)
1519{
1520 unsigned char state;
1521 int used;
1522
1523 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1524
1525 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1526 used = atomic_add_return(count, &q->nr_buf_used);
1527 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1528
1529 if (callflags & QDIO_FLAG_PCI_OUT)
1530 q->u.out.pci_out_enabled = 1;
1531 else
1532 q->u.out.pci_out_enabled = 0;
1533
1534 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1535 if (multicast_outbound(q))
1536 qdio_kick_outbound_q(q);
1537 else
7a0f4755
KDW
1538 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1539 (count > 1) &&
1540 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1541 /* exploit enhanced SIGA */
1542 q->u.out.use_enh_siga = 1;
779e6e1c 1543 qdio_kick_outbound_q(q);
7a0f4755
KDW
1544 } else {
1545 /*
1546 * One siga-w per buffer required for unicast
1547 * HiperSockets.
1548 */
1549 q->u.out.use_enh_siga = 0;
1550 while (count--)
1551 qdio_kick_outbound_q(q);
1552 }
7a0b4cbc
JG
1553
1554 /* report CC=2 conditions synchronously */
1555 if (q->qdio_error)
1556 __qdio_outbound_processing(q);
779e6e1c
JG
1557 goto out;
1558 }
1559
1560 if (need_siga_sync(q)) {
1561 qdio_siga_sync_q(q);
1562 goto out;
1563 }
1564
1565 /* try to fast requeue buffers */
50f769df 1566 get_buf_state(q, prev_buf(bufnr), &state, 0);
779e6e1c
JG
1567 if (state != SLSB_CU_OUTPUT_PRIMED)
1568 qdio_kick_outbound_q(q);
1569 else {
22f99347 1570 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
779e6e1c
JG
1571 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1572 }
1573out:
779e6e1c
JG
1574 tasklet_schedule(&q->tasklet);
1575}
1576
1577/**
1578 * do_QDIO - process input or output buffers
1579 * @cdev: associated ccw_device for the qdio subchannel
1580 * @callflags: input or output and special flags from the program
1581 * @q_nr: queue number
1582 * @bufnr: buffer number
1583 * @count: how many buffers to process
1584 */
1585int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1586 int q_nr, int bufnr, int count)
1587{
1588 struct qdio_irq *irq_ptr;
779e6e1c
JG
1589
1590 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1591 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1592 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1593 return -EINVAL;
1594
1595 if (!count)
1596 return 0;
1597
1598 irq_ptr = cdev->private->qdio_data;
1599 if (!irq_ptr)
1600 return -ENODEV;
1601
779e6e1c 1602 if (callflags & QDIO_FLAG_SYNC_INPUT)
22f99347 1603 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
779e6e1c 1604 else
22f99347
JG
1605 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
1606 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
1607 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
779e6e1c
JG
1608
1609 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1610 return -EBUSY;
1611
1612 if (callflags & QDIO_FLAG_SYNC_INPUT)
22f99347
JG
1613 handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr,
1614 count);
779e6e1c 1615 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
22f99347
JG
1616 handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr,
1617 count);
1618 else
779e6e1c 1619 return -EINVAL;
779e6e1c
JG
1620 return 0;
1621}
1622EXPORT_SYMBOL_GPL(do_QDIO);
1623
1624static int __init init_QDIO(void)
1625{
1626 int rc;
1627
1628 rc = qdio_setup_init();
1629 if (rc)
1630 return rc;
1631 rc = tiqdio_allocate_memory();
1632 if (rc)
1633 goto out_cache;
1634 rc = qdio_debug_init();
1635 if (rc)
1636 goto out_ti;
1637 rc = qdio_setup_perf_stats();
1638 if (rc)
1639 goto out_debug;
1640 rc = tiqdio_register_thinints();
1641 if (rc)
1642 goto out_perf;
1643 return 0;
1644
1645out_perf:
1646 qdio_remove_perf_stats();
1647out_debug:
1648 qdio_debug_exit();
1649out_ti:
1650 tiqdio_free_memory();
1651out_cache:
1652 qdio_setup_exit();
1653 return rc;
1654}
1655
1656static void __exit exit_QDIO(void)
1657{
1658 tiqdio_unregister_thinints();
1659 tiqdio_free_memory();
1660 qdio_remove_perf_stats();
1661 qdio_debug_exit();
1662 qdio_setup_exit();
1663}
1664
1665module_init(init_QDIO);
1666module_exit(exit_QDIO);