2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
76 static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
92 *bb = ((unsigned int) __fc) >> 31;
96 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100 /* all done or next buffer state different */
101 if (ccq == 0 || ccq == 32)
103 /* not all buffers processed */
104 if (ccq == 96 || ccq == 97)
106 /* notify devices immediately */
107 sprintf(dbf_text, "%d", ccq);
108 QDIO_DBF_TEXT2(1, trace, dbf_text);
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
119 * Returns the number of successfull extracted equal buffer states.
120 * Stops processing if a state is different from the last buffers state.
122 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
123 int start, int count)
125 unsigned int ccq = 0;
126 int tmp_count = count, tmp_start = start;
131 BUG_ON(!q->irq_ptr->sch_token);
132 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
135 nr += q->irq_ptr->nr_input_qs;
137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
138 rc = qdio_check_ccq(q, ccq);
140 /* At least one buffer was processed, return and extract the remaining
143 if ((ccq == 96) && (count != tmp_count)) {
144 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
145 return (count - tmp_count);
148 QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
153 QDIO_DBF_TEXT2(1, trace, "eqberr");
154 sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
155 QDIO_DBF_TEXT2(1, trace, dbf_text);
156 q->handler(q->irq_ptr->cdev,
157 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
158 0, -1, -1, q->irq_ptr->int_parm);
161 return count - tmp_count;
165 * qdio_do_sqbs - set buffer states for QEBSM
166 * @q: queue to manipulate
167 * @state: new state of the buffers
168 * @start: first buffer number to change
169 * @count: how many buffers to change
171 * Returns the number of successfully changed buffers.
172 * Does retrying until the specified count of buffer states is set or an
175 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
178 unsigned int ccq = 0;
179 int tmp_count = count, tmp_start = start;
184 BUG_ON(!q->irq_ptr->sch_token);
185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
188 nr += q->irq_ptr->nr_input_qs;
190 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
191 rc = qdio_check_ccq(q, ccq);
193 QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
198 QDIO_DBF_TEXT3(1, trace, "sqberr");
199 sprintf(dbf_text, "%2x,%2x", count, tmp_count);
200 QDIO_DBF_TEXT3(1, trace, dbf_text);
201 sprintf(dbf_text, "%d,%d", ccq, nr);
202 QDIO_DBF_TEXT3(1, trace, dbf_text);
204 q->handler(q->irq_ptr->cdev,
205 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
206 0, -1, -1, q->irq_ptr->int_parm);
210 return count - tmp_count;
213 /* returns number of examined buffers and their common state in *state */
214 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
215 unsigned char *state, unsigned int count)
217 unsigned char __state = 0;
220 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
221 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
224 return qdio_do_eqbs(q, state, bufnr, count);
226 for (i = 0; i < count; i++) {
228 __state = q->slsb.val[bufnr];
229 else if (q->slsb.val[bufnr] != __state)
231 bufnr = next_buf(bufnr);
237 inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 unsigned char *state)
240 return get_buf_states(q, bufnr, state, 1);
243 /* wrap-around safe setting of slsb states, returns number of changed buffers */
244 static inline int set_buf_states(struct qdio_q *q, int bufnr,
245 unsigned char state, int count)
249 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
250 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
253 return qdio_do_sqbs(q, state, bufnr, count);
255 for (i = 0; i < count; i++) {
256 xchg(&q->slsb.val[bufnr], state);
257 bufnr = next_buf(bufnr);
262 static inline int set_buf_state(struct qdio_q *q, int bufnr,
265 return set_buf_states(q, bufnr, state, 1);
268 /* set slsb states to initial state */
269 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
274 for_each_input_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277 for_each_output_queue(irq_ptr, q, i)
278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279 QDIO_MAX_BUFFERS_PER_Q);
282 static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
287 if (!need_siga_sync(q))
290 qdio_perf_stat_inc(&perf_stats.siga_sync);
292 cc = do_siga_sync(q->irq_ptr->schid, output, input);
294 QDIO_DBF_TEXT4(0, trace, "sigasync");
295 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
296 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
301 inline int qdio_siga_sync_q(struct qdio_q *q)
304 return qdio_siga_sync(q, 0, q->mask);
306 return qdio_siga_sync(q, q->mask, 0);
309 static inline int qdio_siga_sync_out(struct qdio_q *q)
311 return qdio_siga_sync(q, ~0U, 0);
314 static inline int qdio_siga_sync_all(struct qdio_q *q)
316 return qdio_siga_sync(q, ~0U, ~0U);
319 static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
324 if (q->u.out.use_enh_siga) {
328 schid = *((u32 *)&q->irq_ptr->schid);
330 schid = q->irq_ptr->sch_token;
333 return do_siga_output(schid, q->mask, busy_bit, fc);
336 static int qdio_siga_output(struct qdio_q *q)
343 QDIO_DBF_TEXT5(0, trace, "sigaout");
344 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
346 qdio_perf_stat_inc(&perf_stats.siga_out);
348 cc = qdio_do_siga_output(q, &busy_bit);
349 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
350 sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr);
351 QDIO_DBF_TEXT3(0, trace, dbf_text);
354 start_time = get_usecs();
355 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
359 if (cc == 2 && busy_bit)
360 cc |= QDIO_ERROR_SIGA_BUSY;
362 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
366 static inline int qdio_siga_input(struct qdio_q *q)
370 QDIO_DBF_TEXT4(0, trace, "sigain");
371 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
373 qdio_perf_stat_inc(&perf_stats.siga_in);
375 cc = do_siga_input(q->irq_ptr->schid, q->mask);
377 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
381 /* called from thinint inbound handler */
382 void qdio_sync_after_thinint(struct qdio_q *q)
384 if (pci_out_supported(q)) {
385 if (need_siga_sync_thinint(q))
386 qdio_siga_sync_all(q);
387 else if (need_siga_sync_out_thinint(q))
388 qdio_siga_sync_out(q);
393 inline void qdio_stop_polling(struct qdio_q *q)
395 spin_lock_bh(&q->u.in.lock);
396 if (!q->u.in.polling) {
397 spin_unlock_bh(&q->u.in.lock);
401 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
403 /* show the card that we are not polling anymore */
404 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
405 spin_unlock_bh(&q->u.in.lock);
408 static void announce_buffer_error(struct qdio_q *q)
413 QDIO_DBF_TEXT3(1, trace, "inperr");
415 QDIO_DBF_TEXT3(0, trace, "outperr");
417 sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
418 q->sbal[q->first_to_check]->element[14].flags,
419 q->sbal[q->first_to_check]->element[15].flags);
420 QDIO_DBF_TEXT3(1, trace, dbf_text);
421 QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
423 q->qdio_error = QDIO_ERROR_SLSB_STATE;
426 static int get_inbound_buffer_frontier(struct qdio_q *q)
432 * If we still poll don't update last_move_ftc, keep the
433 * previously ACK buffer there.
435 if (!q->u.in.polling)
436 q->last_move_ftc = q->first_to_check;
439 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
442 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
443 stop = add_buf(q->first_to_check, count);
446 * No siga sync here, as a PCI or we after a thin interrupt
447 * will sync the queues.
450 /* need to set count to 1 for non-qebsm */
455 if (q->first_to_check == stop)
458 count = get_buf_states(q, q->first_to_check, &state, count);
463 case SLSB_P_INPUT_PRIMED:
464 QDIO_DBF_TEXT5(0, trace, "inptprim");
467 * Only ACK the first buffer. The ACK will be removed in
471 state = SLSB_P_INPUT_NOT_INIT;
474 state = SLSB_P_INPUT_ACK;
476 set_buf_state(q, q->first_to_check, state);
479 * Need to change all PRIMED buffers to NOT_INIT, otherwise
480 * we're loosing initiative in the thinint code.
483 set_buf_states(q, next_buf(q->first_to_check),
484 SLSB_P_INPUT_NOT_INIT, count - 1);
487 * No siga-sync needed for non-qebsm here, as the inbound queue
488 * will be synced on the next siga-r, resp.
489 * tiqdio_is_inbound_q_done will do the siga-sync.
491 q->first_to_check = add_buf(q->first_to_check, count);
492 atomic_sub(count, &q->nr_buf_used);
494 case SLSB_P_INPUT_ERROR:
495 announce_buffer_error(q);
496 /* process the buffer, the upper layer will take care of it */
497 q->first_to_check = add_buf(q->first_to_check, count);
498 atomic_sub(count, &q->nr_buf_used);
500 case SLSB_CU_INPUT_EMPTY:
501 case SLSB_P_INPUT_NOT_INIT:
502 case SLSB_P_INPUT_ACK:
503 QDIO_DBF_TEXT5(0, trace, "inpnipro");
509 QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
510 return q->first_to_check;
513 int qdio_inbound_q_moved(struct qdio_q *q)
517 bufnr = get_inbound_buffer_frontier(q);
519 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
520 if (!need_siga_sync(q) && !pci_out_supported(q))
521 q->u.in.timestamp = get_usecs();
523 QDIO_DBF_TEXT4(0, trace, "inhasmvd");
524 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
530 static int qdio_inbound_q_done(struct qdio_q *q)
532 unsigned char state = 0;
533 #ifdef CONFIG_QDIO_DEBUG
537 if (!atomic_read(&q->nr_buf_used))
541 * We need that one for synchronization with the adapter, as it
542 * does a kind of PCI avoidance.
546 get_buf_state(q, q->first_to_check, &state);
547 if (state == SLSB_P_INPUT_PRIMED)
548 /* we got something to do */
551 /* on VM, we don't poll, so the q is always done here */
552 if (need_siga_sync(q) || pci_out_supported(q))
556 * At this point we know, that inbound first_to_check
557 * has (probably) not moved (see qdio_inbound_processing).
559 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
560 #ifdef CONFIG_QDIO_DEBUG
561 QDIO_DBF_TEXT4(0, trace, "inqisdon");
562 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
563 sprintf(dbf_text, "pf%02x", q->first_to_check);
564 QDIO_DBF_TEXT4(0, trace, dbf_text);
565 #endif /* CONFIG_QDIO_DEBUG */
568 #ifdef CONFIG_QDIO_DEBUG
569 QDIO_DBF_TEXT4(0, trace, "inqisntd");
570 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
571 sprintf(dbf_text, "pf%02x", q->first_to_check);
572 QDIO_DBF_TEXT4(0, trace, dbf_text);
573 #endif /* CONFIG_QDIO_DEBUG */
578 void qdio_kick_inbound_handler(struct qdio_q *q)
580 int count, start, end;
581 #ifdef CONFIG_QDIO_DEBUG
585 qdio_perf_stat_inc(&perf_stats.inbound_handler);
587 start = q->first_to_kick;
588 end = q->first_to_check;
592 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
594 #ifdef CONFIG_QDIO_DEBUG
595 sprintf(dbf_text, "s=%2xc=%2x", start, count);
596 QDIO_DBF_TEXT4(0, trace, dbf_text);
597 #endif /* CONFIG_QDIO_DEBUG */
599 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
602 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
603 start, count, q->irq_ptr->int_parm);
605 /* for the next time */
606 q->first_to_kick = q->first_to_check;
610 static void __qdio_inbound_processing(struct qdio_q *q)
612 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
614 if (!qdio_inbound_q_moved(q))
617 qdio_kick_inbound_handler(q);
619 if (!qdio_inbound_q_done(q))
620 /* means poll time is not yet over */
623 qdio_stop_polling(q);
625 * We need to check again to not lose initiative after
626 * resetting the ACK state.
628 if (!qdio_inbound_q_done(q))
632 /* inbound tasklet */
633 void qdio_inbound_processing(unsigned long data)
635 struct qdio_q *q = (struct qdio_q *)data;
636 __qdio_inbound_processing(q);
639 static int get_outbound_buffer_frontier(struct qdio_q *q)
644 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
645 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
649 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
652 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
653 stop = add_buf(q->first_to_check, count);
655 /* need to set count to 1 for non-qebsm */
660 if (q->first_to_check == stop)
661 return q->first_to_check;
663 count = get_buf_states(q, q->first_to_check, &state, count);
665 return q->first_to_check;
668 case SLSB_P_OUTPUT_EMPTY:
669 /* the adapter got it */
670 QDIO_DBF_TEXT5(0, trace, "outpempt");
672 atomic_sub(count, &q->nr_buf_used);
673 q->first_to_check = add_buf(q->first_to_check, count);
675 * We fetch all buffer states at once. get_buf_states may
676 * return count < stop. For QEBSM we do not loop.
681 case SLSB_P_OUTPUT_ERROR:
682 announce_buffer_error(q);
683 /* process the buffer, the upper layer will take care of it */
684 q->first_to_check = add_buf(q->first_to_check, count);
685 atomic_sub(count, &q->nr_buf_used);
687 case SLSB_CU_OUTPUT_PRIMED:
688 /* the adapter has not fetched the output yet */
689 QDIO_DBF_TEXT5(0, trace, "outpprim");
691 case SLSB_P_OUTPUT_NOT_INIT:
692 case SLSB_P_OUTPUT_HALTED:
697 return q->first_to_check;
700 /* all buffers processed? */
701 static inline int qdio_outbound_q_done(struct qdio_q *q)
703 return atomic_read(&q->nr_buf_used) == 0;
706 static inline int qdio_outbound_q_moved(struct qdio_q *q)
710 bufnr = get_outbound_buffer_frontier(q);
712 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
713 q->last_move_ftc = bufnr;
714 QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
715 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
722 * VM could present us cc=2 and busy bit set on SIGA-write
723 * during reconfiguration of their Guest LAN (only in iqdio mode,
724 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
725 * the queues down immediately).
727 * Therefore qdio_siga_output will try for a short time constantly,
728 * if such a condition occurs. If it doesn't change, it will
729 * increase the busy_siga_counter and save the timestamp, and
730 * schedule the queue for later processing. qdio_outbound_processing
731 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
732 * as often as the value of the counter. This will attempt further SIGA
733 * instructions. For each successful SIGA, the counter is
734 * decreased, for failing SIGAs the counter remains the same, after
735 * all. After some time of no movement, qdio_kick_outbound_q will
736 * finally fail and reflect corresponding error codes to call
737 * the upper layer module and have it take the queues down.
739 * Note that this is a change from the original HiperSockets design
740 * (saying cc=2 and busy bit means take the queues down), but in
741 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
742 * conditions will still take the queues down, but the threshold is
743 * higher due to the Guest LAN environment.
745 * Called from outbound tasklet and do_QDIO handler.
747 static void qdio_kick_outbound_q(struct qdio_q *q)
750 #ifdef CONFIG_QDIO_DEBUG
753 QDIO_DBF_TEXT5(0, trace, "kickoutq");
754 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
755 #endif /* CONFIG_QDIO_DEBUG */
757 if (!need_siga_out(q))
760 rc = qdio_siga_output(q);
763 /* TODO: improve error handling for CC=0 case */
764 #ifdef CONFIG_QDIO_DEBUG
765 if (q->u.out.timestamp) {
766 QDIO_DBF_TEXT3(0, trace, "cc2reslv");
767 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no,
769 atomic_read(&q->u.out.busy_siga_counter));
770 QDIO_DBF_TEXT3(0, trace, dbf_text);
772 #endif /* CONFIG_QDIO_DEBUG */
773 /* went smooth this time, reset timestamp */
774 q->u.out.timestamp = 0;
776 /* cc=2 and busy bit */
777 case (2 | QDIO_ERROR_SIGA_BUSY):
778 atomic_inc(&q->u.out.busy_siga_counter);
780 /* if the last siga was successful, save timestamp here */
781 if (!q->u.out.timestamp)
782 q->u.out.timestamp = get_usecs();
784 /* if we're in time, don't touch qdio_error */
785 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
786 tasklet_schedule(&q->tasklet);
789 QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
790 #ifdef CONFIG_QDIO_DEBUG
791 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
792 atomic_read(&q->u.out.busy_siga_counter));
793 QDIO_DBF_TEXT3(0, trace, dbf_text);
794 #endif /* CONFIG_QDIO_DEBUG */
796 /* for plain cc=1, 2 or 3 */
801 static void qdio_kick_outbound_handler(struct qdio_q *q)
803 int start, end, count;
804 #ifdef CONFIG_QDIO_DEBUG
808 start = q->first_to_kick;
809 end = q->last_move_ftc;
813 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
815 #ifdef CONFIG_QDIO_DEBUG
816 QDIO_DBF_TEXT4(0, trace, "kickouth");
817 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
819 sprintf(dbf_text, "s=%2xc=%2x", start, count);
820 QDIO_DBF_TEXT4(0, trace, dbf_text);
821 #endif /* CONFIG_QDIO_DEBUG */
823 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
826 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
827 q->irq_ptr->int_parm);
829 /* for the next time: */
830 q->first_to_kick = q->last_move_ftc;
834 static void __qdio_outbound_processing(struct qdio_q *q)
838 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
840 /* see comment in qdio_kick_outbound_q */
841 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
842 while (siga_attempts--) {
843 atomic_dec(&q->u.out.busy_siga_counter);
844 qdio_kick_outbound_q(q);
847 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
849 if (qdio_outbound_q_moved(q))
850 qdio_kick_outbound_handler(q);
852 if (queue_type(q) == QDIO_ZFCP_QFMT) {
853 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
854 tasklet_schedule(&q->tasklet);
858 /* bail out for HiperSockets unicast queues */
859 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
862 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
863 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
864 tasklet_schedule(&q->tasklet);
868 if (q->u.out.pci_out_enabled)
872 * Now we know that queue type is either qeth without pci enabled
873 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
874 * EMPTY is noticed and outbound_handler is called after some time.
876 if (qdio_outbound_q_done(q))
877 del_timer(&q->u.out.timer);
879 if (!timer_pending(&q->u.out.timer)) {
880 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
881 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
886 /* outbound tasklet */
887 void qdio_outbound_processing(unsigned long data)
889 struct qdio_q *q = (struct qdio_q *)data;
890 __qdio_outbound_processing(q);
893 void qdio_outbound_timer(unsigned long data)
895 struct qdio_q *q = (struct qdio_q *)data;
896 tasklet_schedule(&q->tasklet);
899 /* called from thinint inbound tasklet */
900 void qdio_check_outbound_after_thinint(struct qdio_q *q)
905 if (!pci_out_supported(q))
908 for_each_output_queue(q->irq_ptr, out, i)
909 if (!qdio_outbound_q_done(out))
910 tasklet_schedule(&out->tasklet);
913 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
914 enum qdio_irq_states state)
916 #ifdef CONFIG_QDIO_DEBUG
919 QDIO_DBF_TEXT5(0, trace, "newstate");
920 sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
921 QDIO_DBF_TEXT5(0, trace, dbf_text);
922 #endif /* CONFIG_QDIO_DEBUG */
924 irq_ptr->state = state;
928 static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
932 if (irb->esw.esw0.erw.cons) {
933 sprintf(dbf_text, "sens%4x", schid.sch_no);
934 QDIO_DBF_TEXT2(1, trace, dbf_text);
935 QDIO_DBF_HEX0(0, trace, irb, 64);
936 QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
940 /* PCI interrupt handler */
941 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
946 qdio_perf_stat_inc(&perf_stats.pci_int);
948 for_each_input_queue(irq_ptr, q, i)
949 tasklet_schedule(&q->tasklet);
951 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
954 for_each_output_queue(irq_ptr, q, i) {
955 if (qdio_outbound_q_done(q))
958 if (!siga_syncs_out_pci(q))
961 tasklet_schedule(&q->tasklet);
965 static void qdio_handle_activate_check(struct ccw_device *cdev,
966 unsigned long intparm, int cstat, int dstat)
968 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
972 QDIO_DBF_TEXT2(1, trace, "ick2");
973 sprintf(dbf_text, "%s", dev_name(&cdev->dev));
974 QDIO_DBF_TEXT2(1, trace, dbf_text);
975 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
976 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
977 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
979 if (irq_ptr->nr_input_qs) {
980 q = irq_ptr->input_qs[0];
981 } else if (irq_ptr->nr_output_qs) {
982 q = irq_ptr->output_qs[0];
987 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
988 0, -1, -1, irq_ptr->int_parm);
990 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
993 static void qdio_call_shutdown(struct work_struct *work)
995 struct ccw_device_private *priv;
996 struct ccw_device *cdev;
998 priv = container_of(work, struct ccw_device_private, kick_work);
1000 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1001 put_device(&cdev->dev);
1004 static void qdio_int_error(struct ccw_device *cdev)
1006 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1008 switch (irq_ptr->state) {
1009 case QDIO_IRQ_STATE_INACTIVE:
1010 case QDIO_IRQ_STATE_CLEANUP:
1011 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1013 case QDIO_IRQ_STATE_ESTABLISHED:
1014 case QDIO_IRQ_STATE_ACTIVE:
1015 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1016 if (get_device(&cdev->dev)) {
1017 /* Can't call shutdown from interrupt context. */
1018 PREPARE_WORK(&cdev->private->kick_work,
1019 qdio_call_shutdown);
1020 queue_work(ccw_device_work, &cdev->private->kick_work);
1026 wake_up(&cdev->private->wait_q);
1029 static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
1032 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1034 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
1035 QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
1039 if (!(dstat & DEV_STAT_DEV_END)) {
1040 QDIO_DBF_TEXT2(1, setup, "eq:no de");
1044 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
1045 QDIO_DBF_TEXT2(1, setup, "eq:badio");
1050 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
1051 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
1052 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1056 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1059 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1062 sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
1063 QDIO_DBF_TEXT0(0, setup, dbf_text);
1064 QDIO_DBF_TEXT0(0, trace, dbf_text);
1066 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1067 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1070 /* qdio interrupt handler */
1071 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1074 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1078 qdio_perf_stat_inc(&perf_stats.qdio_int);
1080 if (!intparm || !irq_ptr) {
1081 sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
1082 QDIO_DBF_TEXT2(1, setup, dbf_text);
1087 switch (PTR_ERR(irb)) {
1089 sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
1090 QDIO_DBF_TEXT2(1, setup, dbf_text);
1093 sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
1094 QDIO_DBF_TEXT2(1, setup, dbf_text);
1095 qdio_int_error(cdev);
1102 qdio_irq_check_sense(irq_ptr->schid, irb);
1104 cstat = irb->scsw.cmd.cstat;
1105 dstat = irb->scsw.cmd.dstat;
1107 switch (irq_ptr->state) {
1108 case QDIO_IRQ_STATE_INACTIVE:
1109 qdio_establish_handle_irq(cdev, cstat, dstat);
1112 case QDIO_IRQ_STATE_CLEANUP:
1113 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1116 case QDIO_IRQ_STATE_ESTABLISHED:
1117 case QDIO_IRQ_STATE_ACTIVE:
1118 if (cstat & SCHN_STAT_PCI) {
1119 qdio_int_handler_pci(irq_ptr);
1120 /* no state change so no need to wake up wait_q */
1123 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1124 qdio_handle_activate_check(cdev, intparm, cstat,
1131 wake_up(&cdev->private->wait_q);
1135 * qdio_get_ssqd_desc - get qdio subchannel description
1136 * @cdev: ccw device to get description for
1137 * @data: where to store the ssqd
1139 * Returns 0 or an error code. The results of the chsc are stored in the
1140 * specified structure.
1142 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1143 struct qdio_ssqd_desc *data)
1147 if (!cdev || !cdev->private)
1150 sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no);
1151 QDIO_DBF_TEXT0(0, setup, dbf_text);
1153 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1155 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1158 * qdio_cleanup - shutdown queues and free data structures
1159 * @cdev: associated ccw device
1160 * @how: use halt or clear to shutdown
1162 * This function calls qdio_shutdown() for @cdev with method @how
1163 * and on success qdio_free() for @cdev.
1165 int qdio_cleanup(struct ccw_device *cdev, int how)
1167 struct qdio_irq *irq_ptr;
1171 sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no);
1172 QDIO_DBF_TEXT0(0, setup, dbf_text);
1174 irq_ptr = cdev->private->qdio_data;
1178 rc = qdio_shutdown(cdev, how);
1180 rc = qdio_free(cdev);
1183 EXPORT_SYMBOL_GPL(qdio_cleanup);
1185 static void qdio_shutdown_queues(struct ccw_device *cdev)
1187 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1191 for_each_input_queue(irq_ptr, q, i)
1192 tasklet_disable(&q->tasklet);
1194 for_each_output_queue(irq_ptr, q, i) {
1195 tasklet_disable(&q->tasklet);
1196 del_timer(&q->u.out.timer);
1201 * qdio_shutdown - shut down a qdio subchannel
1202 * @cdev: associated ccw device
1203 * @how: use halt or clear to shutdown
1205 int qdio_shutdown(struct ccw_device *cdev, int how)
1207 struct qdio_irq *irq_ptr;
1209 unsigned long flags;
1212 sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no);
1213 QDIO_DBF_TEXT0(0, setup, dbf_text);
1215 irq_ptr = cdev->private->qdio_data;
1219 mutex_lock(&irq_ptr->setup_mutex);
1221 * Subchannel was already shot down. We cannot prevent being called
1222 * twice since cio may trigger a shutdown asynchronously.
1224 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1225 mutex_unlock(&irq_ptr->setup_mutex);
1229 tiqdio_remove_input_queues(irq_ptr);
1230 qdio_shutdown_queues(cdev);
1231 qdio_shutdown_debug_entries(irq_ptr, cdev);
1233 /* cleanup subchannel */
1234 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1236 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1237 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1239 /* default behaviour is halt */
1240 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1242 sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
1243 QDIO_DBF_TEXT0(0, setup, dbf_text);
1244 sprintf(dbf_text, "rc=%d", rc);
1245 QDIO_DBF_TEXT0(0, setup, dbf_text);
1249 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1250 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1251 wait_event_interruptible_timeout(cdev->private->wait_q,
1252 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1253 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1255 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1258 qdio_shutdown_thinint(irq_ptr);
1260 /* restore interrupt handler */
1261 if ((void *)cdev->handler == (void *)qdio_int_handler)
1262 cdev->handler = irq_ptr->orig_handler;
1263 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1265 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1266 mutex_unlock(&irq_ptr->setup_mutex);
1271 EXPORT_SYMBOL_GPL(qdio_shutdown);
1274 * qdio_free - free data structures for a qdio subchannel
1275 * @cdev: associated ccw device
1277 int qdio_free(struct ccw_device *cdev)
1279 struct qdio_irq *irq_ptr;
1282 sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no);
1283 QDIO_DBF_TEXT0(0, setup, dbf_text);
1285 irq_ptr = cdev->private->qdio_data;
1289 mutex_lock(&irq_ptr->setup_mutex);
1290 cdev->private->qdio_data = NULL;
1291 mutex_unlock(&irq_ptr->setup_mutex);
1293 qdio_release_memory(irq_ptr);
1296 EXPORT_SYMBOL_GPL(qdio_free);
1299 * qdio_initialize - allocate and establish queues for a qdio subchannel
1300 * @init_data: initialization data
1302 * This function first allocates queues via qdio_allocate() and on success
1303 * establishes them via qdio_establish().
1305 int qdio_initialize(struct qdio_initialize *init_data)
1310 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
1311 QDIO_DBF_TEXT0(0, setup, dbf_text);
1313 rc = qdio_allocate(init_data);
1317 rc = qdio_establish(init_data);
1319 qdio_free(init_data->cdev);
1322 EXPORT_SYMBOL_GPL(qdio_initialize);
1325 * qdio_allocate - allocate qdio queues and associated data
1326 * @init_data: initialization data
1328 int qdio_allocate(struct qdio_initialize *init_data)
1330 struct qdio_irq *irq_ptr;
1333 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
1334 QDIO_DBF_TEXT0(0, setup, dbf_text);
1336 if ((init_data->no_input_qs && !init_data->input_handler) ||
1337 (init_data->no_output_qs && !init_data->output_handler))
1340 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1341 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1344 if ((!init_data->input_sbal_addr_array) ||
1345 (!init_data->output_sbal_addr_array))
1348 qdio_allocate_do_dbf(init_data);
1350 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1351 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1354 QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
1355 QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
1357 mutex_init(&irq_ptr->setup_mutex);
1360 * Allocate a page for the chsc calls in qdio_establish.
1361 * Must be pre-allocated since a zfcp recovery will call
1362 * qdio_establish. In case of low memory and swap on a zfcp disk
1363 * we may not be able to allocate memory otherwise.
1365 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1366 if (!irq_ptr->chsc_page)
1369 /* qdr is used in ccw1.cda which is u32 */
1370 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1373 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1375 QDIO_DBF_TEXT0(0, setup, "qdr:");
1376 QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
1378 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1379 init_data->no_output_qs))
1382 init_data->cdev->private->qdio_data = irq_ptr;
1383 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1386 qdio_release_memory(irq_ptr);
1390 EXPORT_SYMBOL_GPL(qdio_allocate);
1393 * qdio_establish - establish queues on a qdio subchannel
1394 * @init_data: initialization data
1396 int qdio_establish(struct qdio_initialize *init_data)
1399 struct qdio_irq *irq_ptr;
1400 struct ccw_device *cdev = init_data->cdev;
1401 unsigned long saveflags;
1404 sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
1405 QDIO_DBF_TEXT0(0, setup, dbf_text);
1407 irq_ptr = cdev->private->qdio_data;
1411 if (cdev->private->state != DEV_STATE_ONLINE)
1414 mutex_lock(&irq_ptr->setup_mutex);
1415 qdio_setup_irq(init_data);
1417 rc = qdio_establish_thinint(irq_ptr);
1419 mutex_unlock(&irq_ptr->setup_mutex);
1420 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1425 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1426 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1427 irq_ptr->ccw.count = irq_ptr->equeue.count;
1428 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1430 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1431 ccw_device_set_options_mask(cdev, 0);
1433 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1435 sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
1436 QDIO_DBF_TEXT2(1, setup, dbf_text);
1437 sprintf(dbf_text, "eq:rc%4x", rc);
1438 QDIO_DBF_TEXT2(1, setup, dbf_text);
1440 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1443 mutex_unlock(&irq_ptr->setup_mutex);
1444 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1448 wait_event_interruptible_timeout(cdev->private->wait_q,
1449 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1450 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1452 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1453 mutex_unlock(&irq_ptr->setup_mutex);
1454 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1458 qdio_setup_ssqd_info(irq_ptr);
1459 sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc);
1460 QDIO_DBF_TEXT2(0, setup, dbf_text);
1461 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
1462 QDIO_DBF_TEXT2(0, setup, dbf_text);
1464 /* qebsm is now setup if available, initialize buffer states */
1465 qdio_init_buf_states(irq_ptr);
1467 mutex_unlock(&irq_ptr->setup_mutex);
1468 qdio_print_subchannel_info(irq_ptr, cdev);
1469 qdio_setup_debug_entries(irq_ptr, cdev);
1472 EXPORT_SYMBOL_GPL(qdio_establish);
1475 * qdio_activate - activate queues on a qdio subchannel
1476 * @cdev: associated cdev
1478 int qdio_activate(struct ccw_device *cdev)
1480 struct qdio_irq *irq_ptr;
1482 unsigned long saveflags;
1485 sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no);
1486 QDIO_DBF_TEXT0(0, setup, dbf_text);
1488 irq_ptr = cdev->private->qdio_data;
1492 if (cdev->private->state != DEV_STATE_ONLINE)
1495 mutex_lock(&irq_ptr->setup_mutex);
1496 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1501 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1502 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1503 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1504 irq_ptr->ccw.cda = 0;
1506 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1507 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1509 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1510 0, DOIO_DENY_PREFETCH);
1512 sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
1513 QDIO_DBF_TEXT2(1, setup, dbf_text);
1514 sprintf(dbf_text, "aq:rc%4x", rc);
1515 QDIO_DBF_TEXT2(1, setup, dbf_text);
1517 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1522 if (is_thinint_irq(irq_ptr))
1523 tiqdio_add_input_queues(irq_ptr);
1525 /* wait for subchannel to become active */
1528 switch (irq_ptr->state) {
1529 case QDIO_IRQ_STATE_STOPPED:
1530 case QDIO_IRQ_STATE_ERR:
1531 mutex_unlock(&irq_ptr->setup_mutex);
1532 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1535 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1539 mutex_unlock(&irq_ptr->setup_mutex);
1542 EXPORT_SYMBOL_GPL(qdio_activate);
1544 static inline int buf_in_between(int bufnr, int start, int count)
1546 int end = add_buf(start, count);
1549 if (bufnr >= start && bufnr < end)
1555 /* wrap-around case */
1556 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1564 * handle_inbound - reset processed input buffers
1565 * @q: queue containing the buffers
1567 * @bufnr: first buffer to process
1568 * @count: how many buffers are emptied
1570 static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1571 int bufnr, int count)
1573 unsigned long flags;
1577 * do_QDIO could run in parallel with the queue tasklet so the
1578 * upper-layer programm could empty the ACK'ed buffer here.
1579 * If that happens we must clear the polling flag, otherwise
1580 * qdio_stop_polling() could set the buffer to NOT_INIT after
1581 * it was set to EMPTY which would kill us.
1583 spin_lock_irqsave(&q->u.in.lock, flags);
1584 if (q->u.in.polling)
1585 if (buf_in_between(q->last_move_ftc, bufnr, count))
1586 q->u.in.polling = 0;
1588 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1589 spin_unlock_irqrestore(&q->u.in.lock, flags);
1591 used = atomic_add_return(count, &q->nr_buf_used) - count;
1592 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1594 /* no need to signal as long as the adapter had free buffers */
1598 if (need_siga_in(q)) {
1599 rc = qdio_siga_input(q);
1606 * handle_outbound - process filled outbound buffers
1607 * @q: queue containing the buffers
1609 * @bufnr: first buffer to process
1610 * @count: how many buffers are filled
1612 static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1613 int bufnr, int count)
1615 unsigned char state;
1618 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1620 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1621 used = atomic_add_return(count, &q->nr_buf_used);
1622 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1624 if (callflags & QDIO_FLAG_PCI_OUT)
1625 q->u.out.pci_out_enabled = 1;
1627 q->u.out.pci_out_enabled = 0;
1629 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1630 if (multicast_outbound(q))
1631 qdio_kick_outbound_q(q);
1633 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1635 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1636 /* exploit enhanced SIGA */
1637 q->u.out.use_enh_siga = 1;
1638 qdio_kick_outbound_q(q);
1641 * One siga-w per buffer required for unicast
1644 q->u.out.use_enh_siga = 0;
1646 qdio_kick_outbound_q(q);
1651 if (need_siga_sync(q)) {
1652 qdio_siga_sync_q(q);
1656 /* try to fast requeue buffers */
1657 get_buf_state(q, prev_buf(bufnr), &state);
1658 if (state != SLSB_CU_OUTPUT_PRIMED)
1659 qdio_kick_outbound_q(q);
1661 QDIO_DBF_TEXT5(0, trace, "fast-req");
1662 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1665 /* Fixme: could wait forever if called from process context */
1666 tasklet_schedule(&q->tasklet);
1670 * do_QDIO - process input or output buffers
1671 * @cdev: associated ccw_device for the qdio subchannel
1672 * @callflags: input or output and special flags from the program
1673 * @q_nr: queue number
1674 * @bufnr: buffer number
1675 * @count: how many buffers to process
1677 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1678 int q_nr, int bufnr, int count)
1680 struct qdio_irq *irq_ptr;
1681 #ifdef CONFIG_QDIO_DEBUG
1684 sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no);
1685 QDIO_DBF_TEXT3(0, trace, dbf_text);
1686 #endif /* CONFIG_QDIO_DEBUG */
1688 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1689 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1690 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1696 irq_ptr = cdev->private->qdio_data;
1700 #ifdef CONFIG_QDIO_DEBUG
1701 if (callflags & QDIO_FLAG_SYNC_INPUT)
1702 QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
1705 QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
1708 sprintf(dbf_text, "flag%04x", callflags);
1709 QDIO_DBF_TEXT3(0, trace, dbf_text);
1710 sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
1711 QDIO_DBF_TEXT3(0, trace, dbf_text);
1712 #endif /* CONFIG_QDIO_DEBUG */
1714 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1717 if (callflags & QDIO_FLAG_SYNC_INPUT)
1718 handle_inbound(irq_ptr->input_qs[q_nr],
1719 callflags, bufnr, count);
1720 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1721 handle_outbound(irq_ptr->output_qs[q_nr],
1722 callflags, bufnr, count);
1724 QDIO_DBF_TEXT3(1, trace, "doQD:inv");
1729 EXPORT_SYMBOL_GPL(do_QDIO);
1731 static int __init init_QDIO(void)
1735 rc = qdio_setup_init();
1738 rc = tiqdio_allocate_memory();
1741 rc = qdio_debug_init();
1744 rc = qdio_setup_perf_stats();
1747 rc = tiqdio_register_thinints();
1753 qdio_remove_perf_stats();
1757 tiqdio_free_memory();
1763 static void __exit exit_QDIO(void)
1765 tiqdio_unregister_thinints();
1766 tiqdio_free_memory();
1767 qdio_remove_perf_stats();
1772 module_init(init_QDIO);
1773 module_exit(exit_QDIO);