rt2x00: Split watchdog check into a DMA and STATUS timeout
authorIvo van Doorn <ivdoorn@gmail.com>
Mon, 30 Aug 2010 19:15:19 +0000 (21:15 +0200)
committerJohn W. Linville <linville@tuxdriver.com>
Tue, 31 Aug 2010 18:22:25 +0000 (14:22 -0400)
The watchdog for rt2800usb triggers frequently causing all URB's
to be canceled often enough to interrupt the normal TX flow.
More research indicated that not the URB upload to the USB host
were hanging, but instead the TX status reports.

To correctly detect what is going on, we introduce Q_INDEX_DMA_DONE
which is an index counter between Q_INDEX_DONE and Q_INDEX and indicates
if the frame has been transfered to the device.

This also requires the rt2x00queue timeout functions to be updated
to differentiate between a DMA timeout (time between Q_INDEX and
Q_INDEX_DMA_DONE timeout) and a STATUS timeout (time between
Q_INDEX_DMA_DONE and Q_INDEX_DONE timeout)

All Q_INDEX_DMA_DONE code was taken from the RFC from
Helmut Schaa <helmut.schaa@googlemail.com> for the implementation
for watchdog for rt2800pci.

Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com>
Acked-by: Gertjan van Wingerde <gwingerde@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00debug.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h
drivers/net/wireless/rt2x00/rt2x00usb.c

index 762f6b4e7afcb4a44ae53fc3a1f183aa8d4f5043..0ae942cb66df4d55c5ab3d056f975749a670bd70 100644 (file)
@@ -1070,6 +1070,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
  */
 void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
 void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
+void rt2x00lib_dmadone(struct queue_entry *entry);
 void rt2x00lib_txdone(struct queue_entry *entry,
                      struct txdone_entry_desc *txdesc);
 void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
index daf7f950a28f1f7d38abfbebf3728adb5e0523ff..54dc44bb415c974553fe5207f856ba3a57c97b7d 100644 (file)
@@ -338,14 +338,15 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
                return -ENOMEM;
 
        temp = data +
-           sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdone\n");
+           sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
 
        queue_for_each(intf->rt2x00dev, queue) {
                spin_lock_irqsave(&queue->lock, irqflags);
 
-               temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
+               temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
                                queue->count, queue->limit, queue->length,
                                queue->index[Q_INDEX],
+                               queue->index[Q_INDEX_DMA_DONE],
                                queue->index[Q_INDEX_DONE]);
 
                spin_unlock_irqrestore(&queue->lock, irqflags);
index 580595ba5683fff536a8e8fd50d7a2929fad8b5b..053fdd3bd7206c56836a7aa73aed12e155edd026 100644 (file)
@@ -251,6 +251,12 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
 
+void rt2x00lib_dmadone(struct queue_entry *entry)
+{
+       rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
+
 void rt2x00lib_txdone(struct queue_entry *entry,
                      struct txdone_entry_desc *txdesc)
 {
index ecf57635ae5182396dc090f752dc32f72496ce66..6d41599a090c9c71fbd3fe8eff60ee28cd199d7d 100644 (file)
@@ -731,13 +731,13 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
        if (queue->index[index] >= queue->limit)
                queue->index[index] = 0;
 
+       queue->last_action[index] = jiffies;
+
        if (index == Q_INDEX) {
                queue->length++;
-               queue->last_index = jiffies;
        } else if (index == Q_INDEX_DONE) {
                queue->length--;
                queue->count++;
-               queue->last_index_done = jiffies;
        }
 
        spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -746,14 +746,17 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
 static void rt2x00queue_reset(struct data_queue *queue)
 {
        unsigned long irqflags;
+       unsigned int i;
 
        spin_lock_irqsave(&queue->lock, irqflags);
 
        queue->count = 0;
        queue->length = 0;
-       queue->last_index = jiffies;
-       queue->last_index_done = jiffies;
-       memset(queue->index, 0, sizeof(queue->index));
+
+       for (i = 0; i < Q_INDEX_MAX; i++) {
+               queue->index[i] = 0;
+               queue->last_action[i] = jiffies;
+       }
 
        spin_unlock_irqrestore(&queue->lock, irqflags);
 }
index 0e38a911195d78bb5af8d6728573b74ef5a523c3..d81d85f3486611cb79b18571694727c9616d7885 100644 (file)
@@ -401,6 +401,8 @@ struct queue_entry {
  *
  * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
  *     owned by the hardware then the queue is considered to be full.
+ * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
+ *     transfered to the hardware.
  * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
  *     the hardware and for which we need to run the txdone handler. If this
  *     entry is not owned by the hardware the queue is considered to be empty.
@@ -409,6 +411,7 @@ struct queue_entry {
  */
 enum queue_index {
        Q_INDEX,
+       Q_INDEX_DMA_DONE,
        Q_INDEX_DONE,
        Q_INDEX_MAX,
 };
@@ -445,13 +448,12 @@ struct data_queue {
        enum data_queue_qid qid;
 
        spinlock_t lock;
-       unsigned long last_index;
-       unsigned long last_index_done;
        unsigned int count;
        unsigned short limit;
        unsigned short threshold;
        unsigned short length;
        unsigned short index[Q_INDEX_MAX];
+       unsigned long last_action[Q_INDEX_MAX];
 
        unsigned short txop;
        unsigned short aifs;
@@ -616,12 +618,23 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
 }
 
 /**
- * rt2x00queue_timeout - Check if a timeout occured for this queue
+ * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts
  * @queue: Queue to check.
  */
 static inline int rt2x00queue_timeout(struct data_queue *queue)
 {
-       return time_after(queue->last_index, queue->last_index_done + (HZ / 10));
+       return time_after(queue->last_action[Q_INDEX_DMA_DONE],
+                         queue->last_action[Q_INDEX_DONE] + (HZ / 10));
+}
+
+/**
+ * rt2x00queue_timeout - Check if a timeout occured for DMA transfers
+ * @queue: Queue to check.
+ */
+static inline int rt2x00queue_dma_timeout(struct data_queue *queue)
+{
+       return time_after(queue->last_action[Q_INDEX],
+                         queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10));
 }
 
 /**
index 6cc7aa418d8727db8f511a42b1424fd9a7c31671..aec6440d364a7168f264b49997e95725bffdf3e0 100644 (file)
@@ -212,6 +212,11 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
            !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
                return;
 
+       /*
+        * Report the frame as DMA done
+        */
+       rt2x00lib_dmadone(entry);
+
        /*
         * Check if the frame was correctly uploaded
         */
@@ -283,13 +288,14 @@ void rt2x00usb_kill_tx_queue(struct data_queue *queue)
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
 
-static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
+static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
 {
        struct queue_entry *entry;
        struct queue_entry_priv_usb *entry_priv;
        unsigned short threshold = queue->threshold;
 
-       WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid);
+       WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
+               " invoke forced forced reset", queue->qid);
 
        /*
         * Temporarily disable the TX queue, this will force mac80211
@@ -331,13 +337,23 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
        ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
 }
 
+static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
+{
+       WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
+               " invoke forced tx handler", queue->qid);
+
+       ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
+}
+
 void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
 {
        struct data_queue *queue;
 
        tx_queue_for_each(rt2x00dev, queue) {
+               if (rt2x00queue_dma_timeout(queue))
+                       rt2x00usb_watchdog_tx_dma(queue);
                if (rt2x00queue_timeout(queue))
-                       rt2x00usb_watchdog_reset_tx(queue);
+                       rt2x00usb_watchdog_tx_status(queue);
        }
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
@@ -382,6 +398,11 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
            !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
                return;
 
+       /*
+        * Report the frame as DMA done
+        */
+       rt2x00lib_dmadone(entry);
+
        /*
         * Check if the received data is simply too small
         * to be actually valid, or if the urb is signaling