iwlagn: Convert kzalloc to kcalloc
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
65 #include <linux/bitops.h>
66 #include <linux/gfp.h>
67
68 #include "iwl-trans.h"
69 #include "iwl-trans-int-pcie.h"
70 #include "iwl-csr.h"
71 #include "iwl-prph.h"
72 #include "iwl-shared.h"
73 #include "iwl-eeprom.h"
74 #include "iwl-agn-hw.h"
75
76 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
77 {
78         struct iwl_trans_pcie *trans_pcie =
79                 IWL_TRANS_GET_PCIE_TRANS(trans);
80         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81         struct device *dev = bus(trans)->dev;
82
83         memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
84
85         spin_lock_init(&rxq->lock);
86         INIT_LIST_HEAD(&rxq->rx_free);
87         INIT_LIST_HEAD(&rxq->rx_used);
88
89         if (WARN_ON(rxq->bd || rxq->rb_stts))
90                 return -EINVAL;
91
92         /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
93         rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94                                      &rxq->bd_dma, GFP_KERNEL);
95         if (!rxq->bd)
96                 goto err_bd;
97         memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
98
99         /*Allocate the driver's pointer to receive buffer status */
100         rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101                                           &rxq->rb_stts_dma, GFP_KERNEL);
102         if (!rxq->rb_stts)
103                 goto err_rb_stts;
104         memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
105
106         return 0;
107
108 err_rb_stts:
109         dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110                         rxq->bd, rxq->bd_dma);
111         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
112         rxq->bd = NULL;
113 err_bd:
114         return -ENOMEM;
115 }
116
117 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
118 {
119         struct iwl_trans_pcie *trans_pcie =
120                 IWL_TRANS_GET_PCIE_TRANS(trans);
121         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
122         int i;
123
124         /* Fill the rx_used queue with _all_ of the Rx buffers */
125         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126                 /* In the reset function, these buffers may have been allocated
127                  * to an SKB, so we need to unmap and free potential storage */
128                 if (rxq->pool[i].page != NULL) {
129                         dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130                                 PAGE_SIZE << hw_params(trans).rx_page_order,
131                                 DMA_FROM_DEVICE);
132                         __free_pages(rxq->pool[i].page,
133                                      hw_params(trans).rx_page_order);
134                         rxq->pool[i].page = NULL;
135                 }
136                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
137         }
138 }
139
140 static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
141                                  struct iwl_rx_queue *rxq)
142 {
143         u32 rb_size;
144         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
145         u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
146
147         rb_timeout = RX_RB_TIMEOUT;
148
149         if (iwlagn_mod_params.amsdu_size_8K)
150                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
151         else
152                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
153
154         /* Stop Rx DMA */
155         iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
156
157         /* Reset driver's Rx queue write index */
158         iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
159
160         /* Tell device where to find RBD circular buffer in DRAM */
161         iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
162                            (u32)(rxq->bd_dma >> 8));
163
164         /* Tell device where in DRAM to update its Rx status */
165         iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
166                            rxq->rb_stts_dma >> 4);
167
168         /* Enable Rx DMA
169          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
170          *      the credit mechanism in 5000 HW RX FIFO
171          * Direct rx interrupts to hosts
172          * Rx buffer size 4 or 8k
173          * RB timeout 0x10
174          * 256 RBDs
175          */
176         iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
177                            FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
178                            FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
179                            FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
180                            FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
181                            rb_size|
182                            (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
183                            (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184
185         /* Set interrupt coalescing timer to default (2048 usecs) */
186         iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
187 }
188
189 static int iwl_rx_init(struct iwl_trans *trans)
190 {
191         struct iwl_trans_pcie *trans_pcie =
192                 IWL_TRANS_GET_PCIE_TRANS(trans);
193         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
194
195         int i, err;
196         unsigned long flags;
197
198         if (!rxq->bd) {
199                 err = iwl_trans_rx_alloc(trans);
200                 if (err)
201                         return err;
202         }
203
204         spin_lock_irqsave(&rxq->lock, flags);
205         INIT_LIST_HEAD(&rxq->rx_free);
206         INIT_LIST_HEAD(&rxq->rx_used);
207
208         iwl_trans_rxq_free_rx_bufs(trans);
209
210         for (i = 0; i < RX_QUEUE_SIZE; i++)
211                 rxq->queue[i] = NULL;
212
213         /* Set us so that we have processed and used all buffers, but have
214          * not restocked the Rx queue with fresh buffers */
215         rxq->read = rxq->write = 0;
216         rxq->write_actual = 0;
217         rxq->free_count = 0;
218         spin_unlock_irqrestore(&rxq->lock, flags);
219
220         iwlagn_rx_replenish(trans);
221
222         iwl_trans_rx_hw_init(trans, rxq);
223
224         spin_lock_irqsave(&trans->shrd->lock, flags);
225         rxq->need_update = 1;
226         iwl_rx_queue_update_write_ptr(trans, rxq);
227         spin_unlock_irqrestore(&trans->shrd->lock, flags);
228
229         return 0;
230 }
231
232 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
233 {
234         struct iwl_trans_pcie *trans_pcie =
235                 IWL_TRANS_GET_PCIE_TRANS(trans);
236         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
237
238         unsigned long flags;
239
240         /*if rxq->bd is NULL, it means that nothing has been allocated,
241          * exit now */
242         if (!rxq->bd) {
243                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
244                 return;
245         }
246
247         spin_lock_irqsave(&rxq->lock, flags);
248         iwl_trans_rxq_free_rx_bufs(trans);
249         spin_unlock_irqrestore(&rxq->lock, flags);
250
251         dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
252                           rxq->bd, rxq->bd_dma);
253         memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
254         rxq->bd = NULL;
255
256         if (rxq->rb_stts)
257                 dma_free_coherent(bus(trans)->dev,
258                                   sizeof(struct iwl_rb_status),
259                                   rxq->rb_stts, rxq->rb_stts_dma);
260         else
261                 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
262         memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
263         rxq->rb_stts = NULL;
264 }
265
266 static int iwl_trans_rx_stop(struct iwl_trans *trans)
267 {
268
269         /* stop Rx DMA */
270         iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
271         return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
272                             FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
273 }
274
275 static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
276                                     struct iwl_dma_ptr *ptr, size_t size)
277 {
278         if (WARN_ON(ptr->addr))
279                 return -EINVAL;
280
281         ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
282                                        &ptr->dma, GFP_KERNEL);
283         if (!ptr->addr)
284                 return -ENOMEM;
285         ptr->size = size;
286         return 0;
287 }
288
289 static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
290                                     struct iwl_dma_ptr *ptr)
291 {
292         if (unlikely(!ptr->addr))
293                 return;
294
295         dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
296         memset(ptr, 0, sizeof(*ptr));
297 }
298
299 static int iwl_trans_txq_alloc(struct iwl_trans *trans,
300                                 struct iwl_tx_queue *txq, int slots_num,
301                                 u32 txq_id)
302 {
303         size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
304         int i;
305
306         if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
307                 return -EINVAL;
308
309         txq->q.n_window = slots_num;
310
311         txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
312         txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
313
314         if (!txq->meta || !txq->cmd)
315                 goto error;
316
317         if (txq_id == trans->shrd->cmd_queue)
318                 for (i = 0; i < slots_num; i++) {
319                         txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
320                                                 GFP_KERNEL);
321                         if (!txq->cmd[i])
322                                 goto error;
323                 }
324
325         /* Alloc driver data array and TFD circular buffer */
326         /* Driver private data, only for Tx (not command) queues,
327          * not shared with device. */
328         if (txq_id != trans->shrd->cmd_queue) {
329                 txq->skbs = kzalloc(sizeof(txq->skbs[0]) *
330                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331                 if (!txq->skbs) {
332                         IWL_ERR(trans, "kmalloc for auxiliary BD "
333                                   "structures failed\n");
334                         goto error;
335                 }
336         } else {
337                 txq->skbs = NULL;
338         }
339
340         /* Circular buffer of transmit frame descriptors (TFDs),
341          * shared with device */
342         txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
343                                        &txq->q.dma_addr, GFP_KERNEL);
344         if (!txq->tfds) {
345                 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
346                 goto error;
347         }
348         txq->q.id = txq_id;
349
350         return 0;
351 error:
352         kfree(txq->skbs);
353         txq->skbs = NULL;
354         /* since txq->cmd has been zeroed,
355          * all non allocated cmd[i] will be NULL */
356         if (txq->cmd && txq_id == trans->shrd->cmd_queue)
357                 for (i = 0; i < slots_num; i++)
358                         kfree(txq->cmd[i]);
359         kfree(txq->meta);
360         kfree(txq->cmd);
361         txq->meta = NULL;
362         txq->cmd = NULL;
363
364         return -ENOMEM;
365
366 }
367
368 static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
369                       int slots_num, u32 txq_id)
370 {
371         int ret;
372
373         txq->need_update = 0;
374         memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
375
376         /*
377          * For the default queues 0-3, set up the swq_id
378          * already -- all others need to get one later
379          * (if they need one at all).
380          */
381         if (txq_id < 4)
382                 iwl_set_swq_id(txq, txq_id, txq_id);
383
384         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
385          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
386         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387
388         /* Initialize queue's high/low-water marks, and head/tail indexes */
389         ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
390                         txq_id);
391         if (ret)
392                 return ret;
393
394         /*
395          * Tell nic where to find circular buffer of Tx Frame Descriptors for
396          * given Tx queue, and enable the DMA channel used for that queue.
397          * Circular buffer (TFD queue in DRAM) physical base address */
398         iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
399                              txq->q.dma_addr >> 8);
400
401         return 0;
402 }
403
404 /**
405  * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
406  */
407 static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
408 {
409         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
410         struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
411         struct iwl_queue *q = &txq->q;
412         enum dma_data_direction dma_dir;
413
414         if (!q->n_bd)
415                 return;
416
417         /* In the command queue, all the TBs are mapped as BIDI
418          * so unmap them as such.
419          */
420         if (txq_id == trans->shrd->cmd_queue)
421                 dma_dir = DMA_BIDIRECTIONAL;
422         else
423                 dma_dir = DMA_TO_DEVICE;
424
425         while (q->write_ptr != q->read_ptr) {
426                 /* The read_ptr needs to bound by q->n_window */
427                 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
428                                     dma_dir);
429                 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
430         }
431 }
432
433 /**
434  * iwl_tx_queue_free - Deallocate DMA queue.
435  * @txq: Transmit queue to deallocate.
436  *
437  * Empty queue by removing and destroying all BD's.
438  * Free all buffers.
439  * 0-fill, but do not free "txq" descriptor structure.
440  */
441 static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
442 {
443         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
444         struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
445         struct device *dev = bus(trans)->dev;
446         int i;
447         if (WARN_ON(!txq))
448                 return;
449
450         iwl_tx_queue_unmap(trans, txq_id);
451
452         /* De-alloc array of command/tx buffers */
453
454         if (txq_id == trans->shrd->cmd_queue)
455                 for (i = 0; i < txq->q.n_window; i++)
456                         kfree(txq->cmd[i]);
457
458         /* De-alloc circular buffer of TFDs */
459         if (txq->q.n_bd) {
460                 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
461                                   txq->q.n_bd, txq->tfds, txq->q.dma_addr);
462                 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
463         }
464
465         /* De-alloc array of per-TFD driver data */
466         kfree(txq->skbs);
467         txq->skbs = NULL;
468
469         /* deallocate arrays */
470         kfree(txq->cmd);
471         kfree(txq->meta);
472         txq->cmd = NULL;
473         txq->meta = NULL;
474
475         /* 0-fill queue descriptor structure */
476         memset(txq, 0, sizeof(*txq));
477 }
478
479 /**
480  * iwl_trans_tx_free - Free TXQ Context
481  *
482  * Destroy all TX DMA queues and structures
483  */
484 static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
485 {
486         int txq_id;
487         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
488
489         /* Tx queues */
490         if (trans_pcie->txq) {
491                 for (txq_id = 0;
492                      txq_id < hw_params(trans).max_txq_num; txq_id++)
493                         iwl_tx_queue_free(trans, txq_id);
494         }
495
496         kfree(trans_pcie->txq);
497         trans_pcie->txq = NULL;
498
499         iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
500
501         iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
502 }
503
504 /**
505  * iwl_trans_tx_alloc - allocate TX context
506  * Allocate all Tx DMA structures and initialize them
507  *
508  * @param priv
509  * @return error code
510  */
511 static int iwl_trans_tx_alloc(struct iwl_trans *trans)
512 {
513         int ret;
514         int txq_id, slots_num;
515         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
516
517         u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
518                         sizeof(struct iwlagn_scd_bc_tbl);
519
520         /*It is not allowed to alloc twice, so warn when this happens.
521          * We cannot rely on the previous allocation, so free and fail */
522         if (WARN_ON(trans_pcie->txq)) {
523                 ret = -EINVAL;
524                 goto error;
525         }
526
527         ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
528                                    scd_bc_tbls_size);
529         if (ret) {
530                 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
531                 goto error;
532         }
533
534         /* Alloc keep-warm buffer */
535         ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
536         if (ret) {
537                 IWL_ERR(trans, "Keep Warm allocation failed\n");
538                 goto error;
539         }
540
541         trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) *
542                         hw_params(trans).max_txq_num, GFP_KERNEL);
543         if (!trans_pcie->txq) {
544                 IWL_ERR(trans, "Not enough memory for txq\n");
545                 ret = ENOMEM;
546                 goto error;
547         }
548
549         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
550         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
551                 slots_num = (txq_id == trans->shrd->cmd_queue) ?
552                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
553                 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
554                                           slots_num, txq_id);
555                 if (ret) {
556                         IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
557                         goto error;
558                 }
559         }
560
561         return 0;
562
563 error:
564         iwl_trans_pcie_tx_free(trans);
565
566         return ret;
567 }
568 static int iwl_tx_init(struct iwl_trans *trans)
569 {
570         int ret;
571         int txq_id, slots_num;
572         unsigned long flags;
573         bool alloc = false;
574         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
575
576         if (!trans_pcie->txq) {
577                 ret = iwl_trans_tx_alloc(trans);
578                 if (ret)
579                         goto error;
580                 alloc = true;
581         }
582
583         spin_lock_irqsave(&trans->shrd->lock, flags);
584
585         /* Turn off all Tx DMA fifos */
586         iwl_write_prph(bus(trans), SCD_TXFACT, 0);
587
588         /* Tell NIC where to find the "keep warm" buffer */
589         iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
590                            trans_pcie->kw.dma >> 4);
591
592         spin_unlock_irqrestore(&trans->shrd->lock, flags);
593
594         /* Alloc and init all Tx queues, including the command queue (#4/#9) */
595         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
596                 slots_num = (txq_id == trans->shrd->cmd_queue) ?
597                                         TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
598                 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
599                                          slots_num, txq_id);
600                 if (ret) {
601                         IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
602                         goto error;
603                 }
604         }
605
606         return 0;
607 error:
608         /*Upon error, free only if we allocated something */
609         if (alloc)
610                 iwl_trans_pcie_tx_free(trans);
611         return ret;
612 }
613
614 static void iwl_set_pwr_vmain(struct iwl_trans *trans)
615 {
616 /*
617  * (for documentation purposes)
618  * to set power to V_AUX, do:
619
620                 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
621                         iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
622                                                APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
623                                                ~APMG_PS_CTRL_MSK_PWR_SRC);
624  */
625
626         iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
627                                APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
628                                ~APMG_PS_CTRL_MSK_PWR_SRC);
629 }
630
631 static int iwl_nic_init(struct iwl_trans *trans)
632 {
633         unsigned long flags;
634
635         /* nic_init */
636         spin_lock_irqsave(&trans->shrd->lock, flags);
637         iwl_apm_init(priv(trans));
638
639         /* Set interrupt coalescing calibration timer to default (512 usecs) */
640         iwl_write8(bus(trans), CSR_INT_COALESCING,
641                 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
642
643         spin_unlock_irqrestore(&trans->shrd->lock, flags);
644
645         iwl_set_pwr_vmain(trans);
646
647         iwl_nic_config(priv(trans));
648
649         /* Allocate the RX queue, or reset if it is already allocated */
650         iwl_rx_init(trans);
651
652         /* Allocate or reset and init all Tx and Command queues */
653         if (iwl_tx_init(trans))
654                 return -ENOMEM;
655
656         if (hw_params(trans).shadow_reg_enable) {
657                 /* enable shadow regs in HW */
658                 iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
659                         0x800FFFFF);
660         }
661
662         set_bit(STATUS_INIT, &trans->shrd->status);
663
664         return 0;
665 }
666
667 #define HW_READY_TIMEOUT (50)
668
669 /* Note: returns poll_bit return value, which is >= 0 if success */
670 static int iwl_set_hw_ready(struct iwl_trans *trans)
671 {
672         int ret;
673
674         iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
675                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
676
677         /* See if we got it */
678         ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
679                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
680                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
681                                 HW_READY_TIMEOUT);
682
683         IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
684         return ret;
685 }
686
687 /* Note: returns standard 0/-ERROR code */
688 static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
689 {
690         int ret;
691
692         IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
693
694         ret = iwl_set_hw_ready(trans);
695         if (ret >= 0)
696                 return 0;
697
698         /* If HW is not ready, prepare the conditions to check again */
699         iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
700                         CSR_HW_IF_CONFIG_REG_PREPARE);
701
702         ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
703                         ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
704                         CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
705
706         if (ret < 0)
707                 return ret;
708
709         /* HW should be ready by now, check again. */
710         ret = iwl_set_hw_ready(trans);
711         if (ret >= 0)
712                 return 0;
713         return ret;
714 }
715
716 #define IWL_AC_UNSET -1
717
718 struct queue_to_fifo_ac {
719         s8 fifo, ac;
720 };
721
722 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
723         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
724         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
725         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
726         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
727         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
728         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
729         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
730         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
731         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
732         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
733         { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
734 };
735
736 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
737         { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
738         { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
739         { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
740         { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
741         { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
742         { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
743         { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
744         { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
745         { IWL_TX_FIFO_BE_IPAN, 2, },
746         { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
747         { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
748 };
749
750 static const u8 iwlagn_bss_ac_to_fifo[] = {
751         IWL_TX_FIFO_VO,
752         IWL_TX_FIFO_VI,
753         IWL_TX_FIFO_BE,
754         IWL_TX_FIFO_BK,
755 };
756 static const u8 iwlagn_bss_ac_to_queue[] = {
757         0, 1, 2, 3,
758 };
759 static const u8 iwlagn_pan_ac_to_fifo[] = {
760         IWL_TX_FIFO_VO_IPAN,
761         IWL_TX_FIFO_VI_IPAN,
762         IWL_TX_FIFO_BE_IPAN,
763         IWL_TX_FIFO_BK_IPAN,
764 };
765 static const u8 iwlagn_pan_ac_to_queue[] = {
766         7, 6, 5, 4,
767 };
768
769 static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
770 {
771         int ret;
772         struct iwl_trans_pcie *trans_pcie =
773                 IWL_TRANS_GET_PCIE_TRANS(trans);
774
775         trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
776         trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
777         trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
778
779         trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
780         trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
781
782         trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
783         trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
784
785         if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
786              iwl_trans_pcie_prepare_card_hw(trans)) {
787                 IWL_WARN(trans, "Exit HW not ready\n");
788                 return -EIO;
789         }
790
791         /* If platform's RF_KILL switch is NOT set to KILL */
792         if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
793                         CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
794                 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
795         else
796                 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
797
798         if (iwl_is_rfkill(trans->shrd)) {
799                 iwl_set_hw_rfkill_state(priv(trans), true);
800                 iwl_enable_interrupts(trans);
801                 return -ERFKILL;
802         }
803
804         iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
805
806         ret = iwl_nic_init(trans);
807         if (ret) {
808                 IWL_ERR(trans, "Unable to init nic\n");
809                 return ret;
810         }
811
812         /* make sure rfkill handshake bits are cleared */
813         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
814         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
815                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
816
817         /* clear (again), then enable host interrupts */
818         iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
819         iwl_enable_interrupts(trans);
820
821         /* really make sure rfkill handshake bits are cleared */
822         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
823         iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
824
825         return 0;
826 }
827
828 /*
829  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
830  * must be called under priv->shrd->lock and mac access
831  */
832 static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
833 {
834         iwl_write_prph(bus(trans), SCD_TXFACT, mask);
835 }
836
837 static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
838 {
839         const struct queue_to_fifo_ac *queue_to_fifo;
840         struct iwl_trans_pcie *trans_pcie =
841                 IWL_TRANS_GET_PCIE_TRANS(trans);
842         u32 a;
843         unsigned long flags;
844         int i, chan;
845         u32 reg_val;
846
847         spin_lock_irqsave(&trans->shrd->lock, flags);
848
849         trans_pcie->scd_base_addr =
850                 iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
851         a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
852         /* reset conext data memory */
853         for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
854                 a += 4)
855                 iwl_write_targ_mem(bus(trans), a, 0);
856         /* reset tx status memory */
857         for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
858                 a += 4)
859                 iwl_write_targ_mem(bus(trans), a, 0);
860         for (; a < trans_pcie->scd_base_addr +
861                SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
862                a += 4)
863                 iwl_write_targ_mem(bus(trans), a, 0);
864
865         iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
866                        trans_pcie->scd_bc_tbls.dma >> 10);
867
868         /* Enable DMA channel */
869         for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
870                 iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
871                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
872                                 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
873
874         /* Update FH chicken bits */
875         reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
876         iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
877                            reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
878
879         iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
880                 SCD_QUEUECHAIN_SEL_ALL(trans));
881         iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
882
883         /* initiate the queues */
884         for (i = 0; i < hw_params(trans).max_txq_num; i++) {
885                 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
886                 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
887                 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
888                                 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
889                 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
890                                 SCD_CONTEXT_QUEUE_OFFSET(i) +
891                                 sizeof(u32),
892                                 ((SCD_WIN_SIZE <<
893                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
894                                 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
895                                 ((SCD_FRAME_LIMIT <<
896                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
897                                 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
898         }
899
900         iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
901                         IWL_MASK(0, hw_params(trans).max_txq_num));
902
903         /* Activate all Tx DMA/FIFO channels */
904         iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
905
906         /* map queues to FIFOs */
907         if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
908                 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
909         else
910                 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
911
912         iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
913
914         /* make sure all queue are not stopped */
915         memset(&trans_pcie->queue_stopped[0], 0,
916                 sizeof(trans_pcie->queue_stopped));
917         for (i = 0; i < 4; i++)
918                 atomic_set(&trans_pcie->queue_stop_count[i], 0);
919
920         /* reset to 0 to enable all the queue first */
921         trans_pcie->txq_ctx_active_msk = 0;
922
923         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
924                                                 IWLAGN_FIRST_AMPDU_QUEUE);
925         BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
926                                                 IWLAGN_FIRST_AMPDU_QUEUE);
927
928         for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
929                 int fifo = queue_to_fifo[i].fifo;
930                 int ac = queue_to_fifo[i].ac;
931
932                 iwl_txq_ctx_activate(trans_pcie, i);
933
934                 if (fifo == IWL_TX_FIFO_UNUSED)
935                         continue;
936
937                 if (ac != IWL_AC_UNSET)
938                         iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
939                 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
940                                               fifo, 0);
941         }
942
943         spin_unlock_irqrestore(&trans->shrd->lock, flags);
944
945         /* Enable L1-Active */
946         iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
947                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
948 }
949
950 /**
951  * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
952  */
953 static int iwl_trans_tx_stop(struct iwl_trans *trans)
954 {
955         int ch, txq_id;
956         unsigned long flags;
957         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
958
959         /* Turn off all Tx DMA fifos */
960         spin_lock_irqsave(&trans->shrd->lock, flags);
961
962         iwl_trans_txq_set_sched(trans, 0);
963
964         /* Stop each Tx DMA channel, and wait for it to be idle */
965         for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
966                 iwl_write_direct32(bus(trans),
967                                    FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
968                 if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
969                                     FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
970                                     1000))
971                         IWL_ERR(trans, "Failing on timeout while stopping"
972                             " DMA channel %d [0x%08x]", ch,
973                             iwl_read_direct32(bus(trans),
974                                               FH_TSSR_TX_STATUS_REG));
975         }
976         spin_unlock_irqrestore(&trans->shrd->lock, flags);
977
978         if (!trans_pcie->txq) {
979                 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
980                 return 0;
981         }
982
983         /* Unmap DMA from host system and free skb's */
984         for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
985                 iwl_tx_queue_unmap(trans, txq_id);
986
987         return 0;
988 }
989
990 static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
991 {
992         unsigned long flags;
993         struct iwl_trans_pcie *trans_pcie =
994                 IWL_TRANS_GET_PCIE_TRANS(trans);
995
996         spin_lock_irqsave(&trans->shrd->lock, flags);
997         iwl_disable_interrupts(trans);
998         spin_unlock_irqrestore(&trans->shrd->lock, flags);
999
1000         /* wait to make sure we flush pending tasklet*/
1001         synchronize_irq(bus(trans)->irq);
1002         tasklet_kill(&trans_pcie->irq_tasklet);
1003 }
1004
1005 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1006 {
1007         /* stop and reset the on-board processor */
1008         iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1009
1010         /* tell the device to stop sending interrupts */
1011         iwl_trans_pcie_disable_sync_irq(trans);
1012
1013         /* device going down, Stop using ICT table */
1014         iwl_disable_ict(trans);
1015
1016         /*
1017          * If a HW restart happens during firmware loading,
1018          * then the firmware loading might call this function
1019          * and later it might be called again due to the
1020          * restart. So don't process again if the device is
1021          * already dead.
1022          */
1023         if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
1024                 iwl_trans_tx_stop(trans);
1025                 iwl_trans_rx_stop(trans);
1026
1027                 /* Power-down device's busmaster DMA clocks */
1028                 iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
1029                                APMG_CLK_VAL_DMA_CLK_RQT);
1030                 udelay(5);
1031         }
1032
1033         /* Make sure (redundant) we've released our request to stay awake */
1034         iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
1035                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1036
1037         /* Stop the device, and put it in low power state */
1038         iwl_apm_stop(priv(trans));
1039 }
1040
1041 static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1042                 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
1043                 u8 sta_id)
1044 {
1045         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1046         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1047         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1048         struct iwl_tx_cmd *tx_cmd = &dev_cmd->cmd.tx;
1049         struct iwl_cmd_meta *out_meta;
1050         struct iwl_tx_queue *txq;
1051         struct iwl_queue *q;
1052
1053         dma_addr_t phys_addr = 0;
1054         dma_addr_t txcmd_phys;
1055         dma_addr_t scratch_phys;
1056         u16 len, firstlen, secondlen;
1057         u16 seq_number = 0;
1058         u8 wait_write_ptr = 0;
1059         u8 txq_id;
1060         u8 tid = 0;
1061         bool is_agg = false;
1062         __le16 fc = hdr->frame_control;
1063         u8 hdr_len = ieee80211_hdrlen(fc);
1064
1065         /*
1066          * Send this frame after DTIM -- there's a special queue
1067          * reserved for this for contexts that support AP mode.
1068          */
1069         if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1070                 txq_id = trans_pcie->mcast_queue[ctx];
1071
1072                 /*
1073                  * The microcode will clear the more data
1074                  * bit in the last frame it transmits.
1075                  */
1076                 hdr->frame_control |=
1077                         cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1078         } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1079                 txq_id = IWL_AUX_QUEUE;
1080         else
1081                 txq_id =
1082                     trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1083
1084         if (ieee80211_is_data_qos(fc)) {
1085                 u8 *qc = NULL;
1086                 struct iwl_tid_data *tid_data;
1087                 qc = ieee80211_get_qos_ctl(hdr);
1088                 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1089                 tid_data = &trans->shrd->tid_data[sta_id][tid];
1090
1091                 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1092                         return -1;
1093
1094                 seq_number = tid_data->seq_number;
1095                 seq_number &= IEEE80211_SCTL_SEQ;
1096                 hdr->seq_ctrl = hdr->seq_ctrl &
1097                                 cpu_to_le16(IEEE80211_SCTL_FRAG);
1098                 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1099                 seq_number += 0x10;
1100                 /* aggregation is on for this <sta,tid> */
1101                 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1102                     tid_data->agg.state == IWL_AGG_ON) {
1103                         txq_id = tid_data->agg.txq_id;
1104                         is_agg = true;
1105                 }
1106         }
1107
1108         txq = &trans_pcie->txq[txq_id];
1109         q = &txq->q;
1110
1111         /* Set up driver data for this TFD */
1112         txq->skbs[q->write_ptr] = skb;
1113         txq->cmd[q->write_ptr] = dev_cmd;
1114
1115         dev_cmd->hdr.cmd = REPLY_TX;
1116         dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1117                                 INDEX_TO_SEQ(q->write_ptr)));
1118
1119         /* Set up first empty entry in queue's array of Tx/cmd buffers */
1120         out_meta = &txq->meta[q->write_ptr];
1121
1122         /*
1123          * Use the first empty entry in this queue's command buffer array
1124          * to contain the Tx command and MAC header concatenated together
1125          * (payload data will be in another buffer).
1126          * Size of this varies, due to varying MAC header length.
1127          * If end is not dword aligned, we'll have 2 extra bytes at the end
1128          * of the MAC header (device reads on dword boundaries).
1129          * We'll tell device about this padding later.
1130          */
1131         len = sizeof(struct iwl_tx_cmd) +
1132                 sizeof(struct iwl_cmd_header) + hdr_len;
1133         firstlen = (len + 3) & ~3;
1134
1135         /* Tell NIC about any 2-byte padding after MAC header */
1136         if (firstlen != len)
1137                 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1138
1139         /* Physical address of this Tx command's header (not MAC header!),
1140          * within command buffer array. */
1141         txcmd_phys = dma_map_single(bus(trans)->dev,
1142                                     &dev_cmd->hdr, firstlen,
1143                                     DMA_BIDIRECTIONAL);
1144         if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
1145                 return -1;
1146         dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1147         dma_unmap_len_set(out_meta, len, firstlen);
1148
1149         if (!ieee80211_has_morefrags(fc)) {
1150                 txq->need_update = 1;
1151         } else {
1152                 wait_write_ptr = 1;
1153                 txq->need_update = 0;
1154         }
1155
1156         /* Set up TFD's 2nd entry to point directly to remainder of skb,
1157          * if any (802.11 null frames have no payload). */
1158         secondlen = skb->len - hdr_len;
1159         if (secondlen > 0) {
1160                 phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
1161                                            secondlen, DMA_TO_DEVICE);
1162                 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
1163                         dma_unmap_single(bus(trans)->dev,
1164                                          dma_unmap_addr(out_meta, mapping),
1165                                          dma_unmap_len(out_meta, len),
1166                                          DMA_BIDIRECTIONAL);
1167                         return -1;
1168                 }
1169         }
1170
1171         /* Attach buffers to TFD */
1172         iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1173         if (secondlen > 0)
1174                 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1175                                              secondlen, 0);
1176
1177         scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1178                                 offsetof(struct iwl_tx_cmd, scratch);
1179
1180         /* take back ownership of DMA buffer to enable update */
1181         dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
1182                         DMA_BIDIRECTIONAL);
1183         tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1184         tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1185
1186         IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1187                      le16_to_cpu(dev_cmd->hdr.sequence));
1188         IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1189         iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1190         iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1191
1192         /* Set up entry for this TFD in Tx byte-count array */
1193         if (is_agg)
1194                 iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
1195                                                le16_to_cpu(tx_cmd->len));
1196
1197         dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
1198                         DMA_BIDIRECTIONAL);
1199
1200         trace_iwlwifi_dev_tx(priv(trans),
1201                              &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1202                              sizeof(struct iwl_tfd),
1203                              &dev_cmd->hdr, firstlen,
1204                              skb->data + hdr_len, secondlen);
1205
1206         /* Tell device the write index *just past* this latest filled TFD */
1207         q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1208         iwl_txq_update_write_ptr(trans, txq);
1209
1210         if (ieee80211_is_data_qos(fc)) {
1211                 trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
1212                 if (!ieee80211_has_morefrags(fc))
1213                         trans->shrd->tid_data[sta_id][tid].seq_number =
1214                                 seq_number;
1215         }
1216
1217         /*
1218          * At this point the frame is "transmitted" successfully
1219          * and we will get a TX status notification eventually,
1220          * regardless of the value of ret. "ret" only indicates
1221          * whether or not we should update the write pointer.
1222          */
1223         if (iwl_queue_space(q) < q->high_mark) {
1224                 if (wait_write_ptr) {
1225                         txq->need_update = 1;
1226                         iwl_txq_update_write_ptr(trans, txq);
1227                 } else {
1228                         iwl_stop_queue(trans, txq);
1229                 }
1230         }
1231         return 0;
1232 }
1233
1234 static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
1235 {
1236         /* Remove all resets to allow NIC to operate */
1237         iwl_write32(bus(trans), CSR_RESET, 0);
1238 }
1239
1240 static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1241 {
1242         struct iwl_trans_pcie *trans_pcie =
1243                 IWL_TRANS_GET_PCIE_TRANS(trans);
1244         int err;
1245
1246         trans_pcie->inta_mask = CSR_INI_SET_MASK;
1247
1248         tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1249                 iwl_irq_tasklet, (unsigned long)trans);
1250
1251         iwl_alloc_isr_ict(trans);
1252
1253         err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1254                 DRV_NAME, trans);
1255         if (err) {
1256                 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1257                 iwl_free_isr_ict(trans);
1258                 return err;
1259         }
1260
1261         INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1262         return 0;
1263 }
1264
1265 static int iwlagn_txq_check_empty(struct iwl_trans *trans,
1266                            int sta_id, u8 tid, int txq_id)
1267 {
1268         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1269         struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
1270         struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
1271
1272         lockdep_assert_held(&trans->shrd->sta_lock);
1273
1274         switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
1275         case IWL_EMPTYING_HW_QUEUE_DELBA:
1276                 /* We are reclaiming the last packet of the */
1277                 /* aggregated HW queue */
1278                 if ((txq_id  == tid_data->agg.txq_id) &&
1279                     (q->read_ptr == q->write_ptr)) {
1280                         IWL_DEBUG_HT(trans,
1281                                 "HW queue empty: continue DELBA flow\n");
1282                         iwl_trans_pcie_txq_agg_disable(trans, txq_id);
1283                         tid_data->agg.state = IWL_AGG_OFF;
1284                         iwl_stop_tx_ba_trans_ready(priv(trans),
1285                                                    NUM_IWL_RXON_CTX,
1286                                                    sta_id, tid);
1287                         iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1288                 }
1289                 break;
1290         case IWL_EMPTYING_HW_QUEUE_ADDBA:
1291                 /* We are reclaiming the last packet of the queue */
1292                 if (tid_data->tfds_in_queue == 0) {
1293                         IWL_DEBUG_HT(trans,
1294                                 "HW queue empty: continue ADDBA flow\n");
1295                         tid_data->agg.state = IWL_AGG_ON;
1296                         iwl_start_tx_ba_trans_ready(priv(trans),
1297                                                     NUM_IWL_RXON_CTX,
1298                                                     sta_id, tid);
1299                 }
1300                 break;
1301         default:
1302                 break;
1303         }
1304
1305         return 0;
1306 }
1307
1308 static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
1309                             int sta_id, int tid, int freed)
1310 {
1311         lockdep_assert_held(&trans->shrd->sta_lock);
1312
1313         if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
1314                 trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
1315         else {
1316                 IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
1317                         trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
1318                         freed);
1319                 trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
1320         }
1321 }
1322
1323 static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1324                       int txq_id, int ssn, u32 status,
1325                       struct sk_buff_head *skbs)
1326 {
1327         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1328         struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1329         enum iwl_agg_state agg_state;
1330         /* n_bd is usually 256 => n_bd - 1 = 0xff */
1331         int tfd_num = ssn & (txq->q.n_bd - 1);
1332         int freed = 0;
1333         bool cond;
1334
1335         txq->time_stamp = jiffies;
1336
1337         if (txq->sched_retry) {
1338                 agg_state =
1339                         trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
1340                 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1341         } else {
1342                 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1343         }
1344
1345         if (txq->q.read_ptr != tfd_num) {
1346                 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1347                                 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1348                                 ssn , tfd_num, txq_id, txq->swq_id);
1349                 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1350                 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1351                         iwl_wake_queue(trans, txq);
1352         }
1353
1354         iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
1355         iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
1356 }
1357
1358 static void iwl_trans_pcie_free(struct iwl_trans *trans)
1359 {
1360         iwl_trans_pcie_tx_free(trans);
1361         iwl_trans_pcie_rx_free(trans);
1362         free_irq(bus(trans)->irq, trans);
1363         iwl_free_isr_ict(trans);
1364         trans->shrd->trans = NULL;
1365         kfree(trans);
1366 }
1367
1368 #ifdef CONFIG_PM
1369
1370 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1371 {
1372         /*
1373          * This function is called when system goes into suspend state
1374          * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1375          * first but since iwl_mac_stop() has no knowledge of who the caller is,
1376          * it will not call apm_ops.stop() to stop the DMA operation.
1377          * Calling apm_ops.stop here to make sure we stop the DMA.
1378          *
1379          * But of course ... if we have configured WoWLAN then we did other
1380          * things already :-)
1381          */
1382         if (!trans->shrd->wowlan)
1383                 iwl_apm_stop(priv(trans));
1384
1385         return 0;
1386 }
1387
1388 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1389 {
1390         bool hw_rfkill = false;
1391
1392         iwl_enable_interrupts(trans);
1393
1394         if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
1395                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1396                 hw_rfkill = true;
1397
1398         if (hw_rfkill)
1399                 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1400         else
1401                 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1402
1403         iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
1404
1405         return 0;
1406 }
1407 #else /* CONFIG_PM */
1408 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1409 { return 0; }
1410
1411 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1412 { return 0; }
1413
1414 #endif /* CONFIG_PM */
1415
1416 static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1417                                           enum iwl_rxon_context_id ctx)
1418 {
1419         u8 ac, txq_id;
1420         struct iwl_trans_pcie *trans_pcie =
1421                 IWL_TRANS_GET_PCIE_TRANS(trans);
1422
1423         for (ac = 0; ac < AC_NUM; ac++) {
1424                 txq_id = trans_pcie->ac_to_queue[ctx][ac];
1425                 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
1426                         ac,
1427                         (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
1428                               ? "stopped" : "awake");
1429                 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1430         }
1431 }
1432
1433 const struct iwl_trans_ops trans_ops_pcie;
1434
1435 static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1436 {
1437         struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1438                                               sizeof(struct iwl_trans_pcie),
1439                                               GFP_KERNEL);
1440         if (iwl_trans) {
1441                 struct iwl_trans_pcie *trans_pcie =
1442                         IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1443                 iwl_trans->ops = &trans_ops_pcie;
1444                 iwl_trans->shrd = shrd;
1445                 trans_pcie->trans = iwl_trans;
1446                 spin_lock_init(&iwl_trans->hcmd_lock);
1447         }
1448
1449         return iwl_trans;
1450 }
1451
1452 static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
1453 {
1454         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1455
1456         iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
1457 }
1458
1459 #define IWL_FLUSH_WAIT_MS       2000
1460
1461 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1462 {
1463         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1464         struct iwl_tx_queue *txq;
1465         struct iwl_queue *q;
1466         int cnt;
1467         unsigned long now = jiffies;
1468         int ret = 0;
1469
1470         /* waiting for all the tx frames complete might take a while */
1471         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1472                 if (cnt == trans->shrd->cmd_queue)
1473                         continue;
1474                 txq = &trans_pcie->txq[cnt];
1475                 q = &txq->q;
1476                 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1477                        now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1478                         msleep(1);
1479
1480                 if (q->read_ptr != q->write_ptr) {
1481                         IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1482                         ret = -ETIMEDOUT;
1483                         break;
1484                 }
1485         }
1486         return ret;
1487 }
1488
1489 /*
1490  * On every watchdog tick we check (latest) time stamp. If it does not
1491  * change during timeout period and queue is not empty we reset firmware.
1492  */
1493 static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1494 {
1495         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1496         struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1497         struct iwl_queue *q = &txq->q;
1498         unsigned long timeout;
1499
1500         if (q->read_ptr == q->write_ptr) {
1501                 txq->time_stamp = jiffies;
1502                 return 0;
1503         }
1504
1505         timeout = txq->time_stamp +
1506                   msecs_to_jiffies(hw_params(trans).wd_timeout);
1507
1508         if (time_after(jiffies, timeout)) {
1509                 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1510                         hw_params(trans).wd_timeout);
1511                 IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
1512                         q->read_ptr, q->write_ptr);
1513                 return 1;
1514         }
1515
1516         return 0;
1517 }
1518
1519 static const char *get_fh_string(int cmd)
1520 {
1521         switch (cmd) {
1522         IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1523         IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1524         IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1525         IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1526         IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1527         IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1528         IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1529         IWL_CMD(FH_TSSR_TX_STATUS_REG);
1530         IWL_CMD(FH_TSSR_TX_ERROR_REG);
1531         default:
1532                 return "UNKNOWN";
1533         }
1534 }
1535
1536 int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1537 {
1538         int i;
1539 #ifdef CONFIG_IWLWIFI_DEBUG
1540         int pos = 0;
1541         size_t bufsz = 0;
1542 #endif
1543         static const u32 fh_tbl[] = {
1544                 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1545                 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1546                 FH_RSCSR_CHNL0_WPTR,
1547                 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1548                 FH_MEM_RSSR_SHARED_CTRL_REG,
1549                 FH_MEM_RSSR_RX_STATUS_REG,
1550                 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1551                 FH_TSSR_TX_STATUS_REG,
1552                 FH_TSSR_TX_ERROR_REG
1553         };
1554 #ifdef CONFIG_IWLWIFI_DEBUG
1555         if (display) {
1556                 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1557                 *buf = kmalloc(bufsz, GFP_KERNEL);
1558                 if (!*buf)
1559                         return -ENOMEM;
1560                 pos += scnprintf(*buf + pos, bufsz - pos,
1561                                 "FH register values:\n");
1562                 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1563                         pos += scnprintf(*buf + pos, bufsz - pos,
1564                                 "  %34s: 0X%08x\n",
1565                                 get_fh_string(fh_tbl[i]),
1566                                 iwl_read_direct32(bus(trans), fh_tbl[i]));
1567                 }
1568                 return pos;
1569         }
1570 #endif
1571         IWL_ERR(trans, "FH register values:\n");
1572         for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
1573                 IWL_ERR(trans, "  %34s: 0X%08x\n",
1574                         get_fh_string(fh_tbl[i]),
1575                         iwl_read_direct32(bus(trans), fh_tbl[i]));
1576         }
1577         return 0;
1578 }
1579
1580 static const char *get_csr_string(int cmd)
1581 {
1582         switch (cmd) {
1583         IWL_CMD(CSR_HW_IF_CONFIG_REG);
1584         IWL_CMD(CSR_INT_COALESCING);
1585         IWL_CMD(CSR_INT);
1586         IWL_CMD(CSR_INT_MASK);
1587         IWL_CMD(CSR_FH_INT_STATUS);
1588         IWL_CMD(CSR_GPIO_IN);
1589         IWL_CMD(CSR_RESET);
1590         IWL_CMD(CSR_GP_CNTRL);
1591         IWL_CMD(CSR_HW_REV);
1592         IWL_CMD(CSR_EEPROM_REG);
1593         IWL_CMD(CSR_EEPROM_GP);
1594         IWL_CMD(CSR_OTP_GP_REG);
1595         IWL_CMD(CSR_GIO_REG);
1596         IWL_CMD(CSR_GP_UCODE_REG);
1597         IWL_CMD(CSR_GP_DRIVER_REG);
1598         IWL_CMD(CSR_UCODE_DRV_GP1);
1599         IWL_CMD(CSR_UCODE_DRV_GP2);
1600         IWL_CMD(CSR_LED_REG);
1601         IWL_CMD(CSR_DRAM_INT_TBL_REG);
1602         IWL_CMD(CSR_GIO_CHICKEN_BITS);
1603         IWL_CMD(CSR_ANA_PLL_CFG);
1604         IWL_CMD(CSR_HW_REV_WA_REG);
1605         IWL_CMD(CSR_DBG_HPET_MEM_REG);
1606         default:
1607                 return "UNKNOWN";
1608         }
1609 }
1610
1611 void iwl_dump_csr(struct iwl_trans *trans)
1612 {
1613         int i;
1614         static const u32 csr_tbl[] = {
1615                 CSR_HW_IF_CONFIG_REG,
1616                 CSR_INT_COALESCING,
1617                 CSR_INT,
1618                 CSR_INT_MASK,
1619                 CSR_FH_INT_STATUS,
1620                 CSR_GPIO_IN,
1621                 CSR_RESET,
1622                 CSR_GP_CNTRL,
1623                 CSR_HW_REV,
1624                 CSR_EEPROM_REG,
1625                 CSR_EEPROM_GP,
1626                 CSR_OTP_GP_REG,
1627                 CSR_GIO_REG,
1628                 CSR_GP_UCODE_REG,
1629                 CSR_GP_DRIVER_REG,
1630                 CSR_UCODE_DRV_GP1,
1631                 CSR_UCODE_DRV_GP2,
1632                 CSR_LED_REG,
1633                 CSR_DRAM_INT_TBL_REG,
1634                 CSR_GIO_CHICKEN_BITS,
1635                 CSR_ANA_PLL_CFG,
1636                 CSR_HW_REV_WA_REG,
1637                 CSR_DBG_HPET_MEM_REG
1638         };
1639         IWL_ERR(trans, "CSR values:\n");
1640         IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1641                 "CSR_INT_PERIODIC_REG)\n");
1642         for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
1643                 IWL_ERR(trans, "  %25s: 0X%08x\n",
1644                         get_csr_string(csr_tbl[i]),
1645                         iwl_read32(bus(trans), csr_tbl[i]));
1646         }
1647 }
1648
1649 #ifdef CONFIG_IWLWIFI_DEBUGFS
1650 /* create and remove of files */
1651 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
1652         if (!debugfs_create_file(#name, mode, parent, trans,            \
1653                                  &iwl_dbgfs_##name##_ops))              \
1654                 return -ENOMEM;                                         \
1655 } while (0)
1656
1657 /* file operation */
1658 #define DEBUGFS_READ_FUNC(name)                                         \
1659 static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
1660                                         char __user *user_buf,          \
1661                                         size_t count, loff_t *ppos);
1662
1663 #define DEBUGFS_WRITE_FUNC(name)                                        \
1664 static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
1665                                         const char __user *user_buf,    \
1666                                         size_t count, loff_t *ppos);
1667
1668
1669 static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1670 {
1671         file->private_data = inode->i_private;
1672         return 0;
1673 }
1674
1675 #define DEBUGFS_READ_FILE_OPS(name)                                     \
1676         DEBUGFS_READ_FUNC(name);                                        \
1677 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1678         .read = iwl_dbgfs_##name##_read,                                \
1679         .open = iwl_dbgfs_open_file_generic,                            \
1680         .llseek = generic_file_llseek,                                  \
1681 };
1682
1683 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
1684         DEBUGFS_WRITE_FUNC(name);                                       \
1685 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1686         .write = iwl_dbgfs_##name##_write,                              \
1687         .open = iwl_dbgfs_open_file_generic,                            \
1688         .llseek = generic_file_llseek,                                  \
1689 };
1690
1691 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
1692         DEBUGFS_READ_FUNC(name);                                        \
1693         DEBUGFS_WRITE_FUNC(name);                                       \
1694 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1695         .write = iwl_dbgfs_##name##_write,                              \
1696         .read = iwl_dbgfs_##name##_read,                                \
1697         .open = iwl_dbgfs_open_file_generic,                            \
1698         .llseek = generic_file_llseek,                                  \
1699 };
1700
1701 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1702                                                 char __user *user_buf,
1703                                                 size_t count, loff_t *ppos)
1704 {
1705         struct iwl_trans *trans = file->private_data;
1706         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1707         struct iwl_tx_queue *txq;
1708         struct iwl_queue *q;
1709         char *buf;
1710         int pos = 0;
1711         int cnt;
1712         int ret;
1713         const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
1714
1715         if (!trans_pcie->txq) {
1716                 IWL_ERR(trans, "txq not ready\n");
1717                 return -EAGAIN;
1718         }
1719         buf = kzalloc(bufsz, GFP_KERNEL);
1720         if (!buf)
1721                 return -ENOMEM;
1722
1723         for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1724                 txq = &trans_pcie->txq[cnt];
1725                 q = &txq->q;
1726                 pos += scnprintf(buf + pos, bufsz - pos,
1727                                 "hwq %.2d: read=%u write=%u stop=%d"
1728                                 " swq_id=%#.2x (ac %d/hwq %d)\n",
1729                                 cnt, q->read_ptr, q->write_ptr,
1730                                 !!test_bit(cnt, trans_pcie->queue_stopped),
1731                                 txq->swq_id, txq->swq_id & 3,
1732                                 (txq->swq_id >> 2) & 0x1f);
1733                 if (cnt >= 4)
1734                         continue;
1735                 /* for the ACs, display the stop count too */
1736                 pos += scnprintf(buf + pos, bufsz - pos,
1737                         "        stop-count: %d\n",
1738                         atomic_read(&trans_pcie->queue_stop_count[cnt]));
1739         }
1740         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1741         kfree(buf);
1742         return ret;
1743 }
1744
1745 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1746                                                 char __user *user_buf,
1747                                                 size_t count, loff_t *ppos) {
1748         struct iwl_trans *trans = file->private_data;
1749         struct iwl_trans_pcie *trans_pcie =
1750                 IWL_TRANS_GET_PCIE_TRANS(trans);
1751         struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1752         char buf[256];
1753         int pos = 0;
1754         const size_t bufsz = sizeof(buf);
1755
1756         pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1757                                                 rxq->read);
1758         pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1759                                                 rxq->write);
1760         pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1761                                                 rxq->free_count);
1762         if (rxq->rb_stts) {
1763                 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1764                          le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1765         } else {
1766                 pos += scnprintf(buf + pos, bufsz - pos,
1767                                         "closed_rb_num: Not Allocated\n");
1768         }
1769         return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1770 }
1771
1772 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1773                                          char __user *user_buf,
1774                                          size_t count, loff_t *ppos)
1775 {
1776         struct iwl_trans *trans = file->private_data;
1777         char *buf;
1778         int pos = 0;
1779         ssize_t ret = -ENOMEM;
1780
1781         ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
1782         if (buf) {
1783                 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1784                 kfree(buf);
1785         }
1786         return ret;
1787 }
1788
1789 static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1790                                         const char __user *user_buf,
1791                                         size_t count, loff_t *ppos)
1792 {
1793         struct iwl_trans *trans = file->private_data;
1794         u32 event_log_flag;
1795         char buf[8];
1796         int buf_size;
1797
1798         memset(buf, 0, sizeof(buf));
1799         buf_size = min(count, sizeof(buf) -  1);
1800         if (copy_from_user(buf, user_buf, buf_size))
1801                 return -EFAULT;
1802         if (sscanf(buf, "%d", &event_log_flag) != 1)
1803                 return -EFAULT;
1804         if (event_log_flag == 1)
1805                 iwl_dump_nic_event_log(trans, true, NULL, false);
1806
1807         return count;
1808 }
1809
1810 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1811                                         char __user *user_buf,
1812                                         size_t count, loff_t *ppos) {
1813
1814         struct iwl_trans *trans = file->private_data;
1815         struct iwl_trans_pcie *trans_pcie =
1816                 IWL_TRANS_GET_PCIE_TRANS(trans);
1817         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1818
1819         int pos = 0;
1820         char *buf;
1821         int bufsz = 24 * 64; /* 24 items * 64 char per item */
1822         ssize_t ret;
1823
1824         buf = kzalloc(bufsz, GFP_KERNEL);
1825         if (!buf) {
1826                 IWL_ERR(trans, "Can not allocate Buffer\n");
1827                 return -ENOMEM;
1828         }
1829
1830         pos += scnprintf(buf + pos, bufsz - pos,
1831                         "Interrupt Statistics Report:\n");
1832
1833         pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1834                 isr_stats->hw);
1835         pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1836                 isr_stats->sw);
1837         if (isr_stats->sw || isr_stats->hw) {
1838                 pos += scnprintf(buf + pos, bufsz - pos,
1839                         "\tLast Restarting Code:  0x%X\n",
1840                         isr_stats->err_code);
1841         }
1842 #ifdef CONFIG_IWLWIFI_DEBUG
1843         pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1844                 isr_stats->sch);
1845         pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1846                 isr_stats->alive);
1847 #endif
1848         pos += scnprintf(buf + pos, bufsz - pos,
1849                 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1850
1851         pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1852                 isr_stats->ctkill);
1853
1854         pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1855                 isr_stats->wakeup);
1856
1857         pos += scnprintf(buf + pos, bufsz - pos,
1858                 "Rx command responses:\t\t %u\n", isr_stats->rx);
1859
1860         pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1861                 isr_stats->tx);
1862
1863         pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1864                 isr_stats->unhandled);
1865
1866         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1867         kfree(buf);
1868         return ret;
1869 }
1870
1871 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1872                                          const char __user *user_buf,
1873                                          size_t count, loff_t *ppos)
1874 {
1875         struct iwl_trans *trans = file->private_data;
1876         struct iwl_trans_pcie *trans_pcie =
1877                 IWL_TRANS_GET_PCIE_TRANS(trans);
1878         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1879
1880         char buf[8];
1881         int buf_size;
1882         u32 reset_flag;
1883
1884         memset(buf, 0, sizeof(buf));
1885         buf_size = min(count, sizeof(buf) -  1);
1886         if (copy_from_user(buf, user_buf, buf_size))
1887                 return -EFAULT;
1888         if (sscanf(buf, "%x", &reset_flag) != 1)
1889                 return -EFAULT;
1890         if (reset_flag == 0)
1891                 memset(isr_stats, 0, sizeof(*isr_stats));
1892
1893         return count;
1894 }
1895
1896 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1897                                          const char __user *user_buf,
1898                                          size_t count, loff_t *ppos)
1899 {
1900         struct iwl_trans *trans = file->private_data;
1901         char buf[8];
1902         int buf_size;
1903         int csr;
1904
1905         memset(buf, 0, sizeof(buf));
1906         buf_size = min(count, sizeof(buf) -  1);
1907         if (copy_from_user(buf, user_buf, buf_size))
1908                 return -EFAULT;
1909         if (sscanf(buf, "%d", &csr) != 1)
1910                 return -EFAULT;
1911
1912         iwl_dump_csr(trans);
1913
1914         return count;
1915 }
1916
1917 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1918                                          char __user *user_buf,
1919                                          size_t count, loff_t *ppos)
1920 {
1921         struct iwl_trans *trans = file->private_data;
1922         char *buf;
1923         int pos = 0;
1924         ssize_t ret = -EFAULT;
1925
1926         ret = pos = iwl_dump_fh(trans, &buf, true);
1927         if (buf) {
1928                 ret = simple_read_from_buffer(user_buf,
1929                                               count, ppos, buf, pos);
1930                 kfree(buf);
1931         }
1932
1933         return ret;
1934 }
1935
1936 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
1937 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1938 DEBUGFS_READ_FILE_OPS(fh_reg);
1939 DEBUGFS_READ_FILE_OPS(rx_queue);
1940 DEBUGFS_READ_FILE_OPS(tx_queue);
1941 DEBUGFS_WRITE_FILE_OPS(csr);
1942
1943 /*
1944  * Create the debugfs files and directories
1945  *
1946  */
1947 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1948                                         struct dentry *dir)
1949 {
1950         DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1951         DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1952         DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
1953         DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1954         DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1955         DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1956         return 0;
1957 }
1958 #else
1959 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1960                                         struct dentry *dir)
1961 { return 0; }
1962
1963 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1964
1965 const struct iwl_trans_ops trans_ops_pcie = {
1966         .alloc = iwl_trans_pcie_alloc,
1967         .request_irq = iwl_trans_pcie_request_irq,
1968         .start_device = iwl_trans_pcie_start_device,
1969         .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1970         .stop_device = iwl_trans_pcie_stop_device,
1971
1972         .tx_start = iwl_trans_pcie_tx_start,
1973         .wake_any_queue = iwl_trans_pcie_wake_any_queue,
1974
1975         .send_cmd = iwl_trans_pcie_send_cmd,
1976         .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
1977
1978         .tx = iwl_trans_pcie_tx,
1979         .reclaim = iwl_trans_pcie_reclaim,
1980
1981         .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
1982         .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
1983         .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
1984
1985         .kick_nic = iwl_trans_pcie_kick_nic,
1986
1987         .free = iwl_trans_pcie_free,
1988         .stop_queue = iwl_trans_pcie_stop_queue,
1989
1990         .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1991
1992         .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
1993         .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
1994
1995         .suspend = iwl_trans_pcie_suspend,
1996         .resume = iwl_trans_pcie_resume,
1997 };
1998