2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <net/mac80211.h>
13 #include <linux/sched.h>
18 /* private */ struct cw1200_queue_item
20 struct list_head head;
23 unsigned long queue_timestamp;
24 unsigned long xmit_timestamp;
25 struct cw1200_txpriv txpriv;
29 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
31 struct cw1200_queue_stats *stats = queue->stats;
32 if (queue->tx_locked_cnt++ == 0) {
33 pr_debug("[TX] Queue %d is locked.\n",
35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
39 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
41 struct cw1200_queue_stats *stats = queue->stats;
42 BUG_ON(!queue->tx_locked_cnt);
43 if (--queue->tx_locked_cnt == 0) {
44 pr_debug("[TX] Queue %d is unlocked.\n",
46 ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
50 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
51 u8 *queue_id, u8 *item_generation,
54 *item_id = (packet_id >> 0) & 0xFF;
55 *item_generation = (packet_id >> 8) & 0xFF;
56 *queue_id = (packet_id >> 16) & 0xFF;
57 *queue_generation = (packet_id >> 24) & 0xFF;
60 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
61 u8 item_generation, u8 item_id)
63 return ((u32)item_id << 0) |
64 ((u32)item_generation << 8) |
65 ((u32)queue_id << 16) |
66 ((u32)queue_generation << 24);
69 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
70 struct list_head *gc_list)
72 struct cw1200_queue_item *item, *tmp;
74 list_for_each_entry_safe(item, tmp, gc_list, head) {
75 list_del(&item->head);
76 stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
81 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
82 struct cw1200_queue_item *item)
84 struct cw1200_queue_item *gc_item;
85 gc_item = kmalloc(sizeof(struct cw1200_queue_item),
88 memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
89 list_add_tail(&gc_item->head, gc_list);
92 static void __cw1200_queue_gc(struct cw1200_queue *queue,
93 struct list_head *head,
96 struct cw1200_queue_stats *stats = queue->stats;
97 struct cw1200_queue_item *item = NULL, *tmp;
98 bool wakeup_stats = false;
100 list_for_each_entry_safe(item, tmp, &queue->queue, head) {
101 if (jiffies - item->queue_timestamp < queue->ttl)
104 --queue->link_map_cache[item->txpriv.link_id];
105 spin_lock_bh(&stats->lock);
107 if (!--stats->link_map_cache[item->txpriv.link_id])
109 spin_unlock_bh(&stats->lock);
110 cw1200_debug_tx_ttl(stats->priv);
111 cw1200_queue_register_post_gc(head, item);
113 list_move_tail(&item->head, &queue->free_pool);
117 wake_up(&stats->wait_link_id_empty);
119 if (queue->overfull) {
120 if (queue->num_queued <= (queue->capacity >> 1)) {
121 queue->overfull = false;
123 __cw1200_queue_unlock(queue);
125 unsigned long tmo = item->queue_timestamp + queue->ttl;
126 mod_timer(&queue->gc, tmo);
127 cw1200_pm_stay_awake(&stats->priv->pm_state,
133 static void cw1200_queue_gc(unsigned long arg)
136 struct cw1200_queue *queue =
137 (struct cw1200_queue *)arg;
139 spin_lock_bh(&queue->lock);
140 __cw1200_queue_gc(queue, &list, true);
141 spin_unlock_bh(&queue->lock);
142 cw1200_queue_post_gc(queue->stats, &list);
145 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
147 cw1200_queue_skb_dtor_t skb_dtor,
148 struct cw1200_common *priv)
150 memset(stats, 0, sizeof(*stats));
151 stats->map_capacity = map_capacity;
152 stats->skb_dtor = skb_dtor;
154 spin_lock_init(&stats->lock);
155 init_waitqueue_head(&stats->wait_link_id_empty);
157 stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
159 if (!stats->link_map_cache)
165 int cw1200_queue_init(struct cw1200_queue *queue,
166 struct cw1200_queue_stats *stats,
173 memset(queue, 0, sizeof(*queue));
174 queue->stats = stats;
175 queue->capacity = capacity;
176 queue->queue_id = queue_id;
178 INIT_LIST_HEAD(&queue->queue);
179 INIT_LIST_HEAD(&queue->pending);
180 INIT_LIST_HEAD(&queue->free_pool);
181 spin_lock_init(&queue->lock);
182 setup_timer(&queue->gc, cw1200_queue_gc, (unsigned long)queue);
184 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
189 queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
191 if (!queue->link_map_cache) {
197 for (i = 0; i < capacity; ++i)
198 list_add_tail(&queue->pool[i].head, &queue->free_pool);
203 int cw1200_queue_clear(struct cw1200_queue *queue)
207 struct cw1200_queue_stats *stats = queue->stats;
208 struct cw1200_queue_item *item, *tmp;
210 spin_lock_bh(&queue->lock);
212 list_splice_tail_init(&queue->queue, &queue->pending);
213 list_for_each_entry_safe(item, tmp, &queue->pending, head) {
215 cw1200_queue_register_post_gc(&gc_list, item);
217 list_move_tail(&item->head, &queue->free_pool);
219 queue->num_queued = 0;
220 queue->num_pending = 0;
222 spin_lock_bh(&stats->lock);
223 for (i = 0; i < stats->map_capacity; ++i) {
224 stats->num_queued -= queue->link_map_cache[i];
225 stats->link_map_cache[i] -= queue->link_map_cache[i];
226 queue->link_map_cache[i] = 0;
228 spin_unlock_bh(&stats->lock);
229 if (queue->overfull) {
230 queue->overfull = false;
231 __cw1200_queue_unlock(queue);
233 spin_unlock_bh(&queue->lock);
234 wake_up(&stats->wait_link_id_empty);
235 cw1200_queue_post_gc(stats, &gc_list);
239 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
241 kfree(stats->link_map_cache);
242 stats->link_map_cache = NULL;
245 void cw1200_queue_deinit(struct cw1200_queue *queue)
247 cw1200_queue_clear(queue);
248 del_timer_sync(&queue->gc);
249 INIT_LIST_HEAD(&queue->free_pool);
251 kfree(queue->link_map_cache);
253 queue->link_map_cache = NULL;
257 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
262 size_t map_capacity = queue->stats->map_capacity;
267 spin_lock_bh(&queue->lock);
268 if (link_id_map == (u32)-1) {
269 ret = queue->num_queued - queue->num_pending;
272 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
273 if (link_id_map & bit)
274 ret += queue->link_map_cache[i];
277 spin_unlock_bh(&queue->lock);
281 int cw1200_queue_put(struct cw1200_queue *queue,
283 struct cw1200_txpriv *txpriv)
287 struct cw1200_queue_stats *stats = queue->stats;
289 if (txpriv->link_id >= queue->stats->map_capacity)
292 spin_lock_bh(&queue->lock);
293 if (!WARN_ON(list_empty(&queue->free_pool))) {
294 struct cw1200_queue_item *item = list_first_entry(
295 &queue->free_pool, struct cw1200_queue_item, head);
298 list_move_tail(&item->head, &queue->queue);
300 item->txpriv = *txpriv;
301 item->generation = 0;
302 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
306 item->queue_timestamp = jiffies;
309 ++queue->link_map_cache[txpriv->link_id];
311 spin_lock_bh(&stats->lock);
313 ++stats->link_map_cache[txpriv->link_id];
314 spin_unlock_bh(&stats->lock);
316 /* TX may happen in parallel sometimes.
317 * Leave extra queue slots so we don't overflow.
319 if (queue->overfull == false &&
321 (queue->capacity - (num_present_cpus() - 1))) {
322 queue->overfull = true;
323 __cw1200_queue_lock(queue);
324 mod_timer(&queue->gc, jiffies);
329 spin_unlock_bh(&queue->lock);
333 int cw1200_queue_get(struct cw1200_queue *queue,
336 struct ieee80211_tx_info **tx_info,
337 const struct cw1200_txpriv **txpriv)
340 struct cw1200_queue_item *item;
341 struct cw1200_queue_stats *stats = queue->stats;
342 bool wakeup_stats = false;
344 spin_lock_bh(&queue->lock);
345 list_for_each_entry(item, &queue->queue, head) {
346 if (link_id_map & BIT(item->txpriv.link_id)) {
353 *tx = (struct wsm_tx *)item->skb->data;
354 *tx_info = IEEE80211_SKB_CB(item->skb);
355 *txpriv = &item->txpriv;
356 (*tx)->packet_id = item->packet_id;
357 list_move_tail(&item->head, &queue->pending);
358 ++queue->num_pending;
359 --queue->link_map_cache[item->txpriv.link_id];
360 item->xmit_timestamp = jiffies;
362 spin_lock_bh(&stats->lock);
364 if (!--stats->link_map_cache[item->txpriv.link_id])
366 spin_unlock_bh(&stats->lock);
368 spin_unlock_bh(&queue->lock);
370 wake_up(&stats->wait_link_id_empty);
374 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
377 u8 queue_generation, queue_id, item_generation, item_id;
378 struct cw1200_queue_item *item;
379 struct cw1200_queue_stats *stats = queue->stats;
381 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
382 &item_generation, &item_id);
384 item = &queue->pool[item_id];
386 spin_lock_bh(&queue->lock);
387 BUG_ON(queue_id != queue->queue_id);
388 if (queue_generation != queue->generation) {
390 } else if (item_id >= (unsigned) queue->capacity) {
393 } else if (item->generation != item_generation) {
397 --queue->num_pending;
398 ++queue->link_map_cache[item->txpriv.link_id];
400 spin_lock_bh(&stats->lock);
402 ++stats->link_map_cache[item->txpriv.link_id];
403 spin_unlock_bh(&stats->lock);
405 item->generation = ++item_generation;
406 item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
410 list_move(&item->head, &queue->queue);
412 spin_unlock_bh(&queue->lock);
416 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
418 struct cw1200_queue_item *item, *tmp;
419 struct cw1200_queue_stats *stats = queue->stats;
420 spin_lock_bh(&queue->lock);
422 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
423 --queue->num_pending;
424 ++queue->link_map_cache[item->txpriv.link_id];
426 spin_lock_bh(&stats->lock);
428 ++stats->link_map_cache[item->txpriv.link_id];
429 spin_unlock_bh(&stats->lock);
432 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
436 list_move(&item->head, &queue->queue);
438 spin_unlock_bh(&queue->lock);
443 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
446 u8 queue_generation, queue_id, item_generation, item_id;
447 struct cw1200_queue_item *item;
448 struct cw1200_queue_stats *stats = queue->stats;
449 struct sk_buff *gc_skb = NULL;
450 struct cw1200_txpriv gc_txpriv;
452 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
453 &item_generation, &item_id);
455 item = &queue->pool[item_id];
457 spin_lock_bh(&queue->lock);
458 BUG_ON(queue_id != queue->queue_id);
459 if (queue_generation != queue->generation) {
461 } else if (item_id >= (unsigned) queue->capacity) {
464 } else if (item->generation != item_generation) {
468 gc_txpriv = item->txpriv;
471 --queue->num_pending;
475 /* Do not use list_move_tail here, but list_move:
476 * try to utilize cache row.
478 list_move(&item->head, &queue->free_pool);
480 if (queue->overfull &&
481 (queue->num_queued <= (queue->capacity >> 1))) {
482 queue->overfull = false;
483 __cw1200_queue_unlock(queue);
486 spin_unlock_bh(&queue->lock);
489 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
494 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
495 struct sk_buff **skb,
496 const struct cw1200_txpriv **txpriv)
499 u8 queue_generation, queue_id, item_generation, item_id;
500 struct cw1200_queue_item *item;
501 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
502 &item_generation, &item_id);
504 item = &queue->pool[item_id];
506 spin_lock_bh(&queue->lock);
507 BUG_ON(queue_id != queue->queue_id);
508 if (queue_generation != queue->generation) {
510 } else if (item_id >= (unsigned) queue->capacity) {
513 } else if (item->generation != item_generation) {
518 *txpriv = &item->txpriv;
520 spin_unlock_bh(&queue->lock);
524 void cw1200_queue_lock(struct cw1200_queue *queue)
526 spin_lock_bh(&queue->lock);
527 __cw1200_queue_lock(queue);
528 spin_unlock_bh(&queue->lock);
531 void cw1200_queue_unlock(struct cw1200_queue *queue)
533 spin_lock_bh(&queue->lock);
534 __cw1200_queue_unlock(queue);
535 spin_unlock_bh(&queue->lock);
538 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
539 unsigned long *timestamp,
540 u32 pending_frame_id)
542 struct cw1200_queue_item *item;
545 spin_lock_bh(&queue->lock);
546 ret = !list_empty(&queue->pending);
548 list_for_each_entry(item, &queue->pending, head) {
549 if (item->packet_id != pending_frame_id)
550 if (time_before(item->xmit_timestamp,
552 *timestamp = item->xmit_timestamp;
555 spin_unlock_bh(&queue->lock);
559 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
564 spin_lock_bh(&stats->lock);
565 if (link_id_map == (u32)-1) {
566 empty = stats->num_queued == 0;
569 for (i = 0; i < stats->map_capacity; ++i) {
570 if (link_id_map & BIT(i)) {
571 if (stats->link_map_cache[i]) {
578 spin_unlock_bh(&stats->lock);