92c01ad9de158ee8b352d0296f1f44fb10682404
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / i40e / i40e_adminq.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * The full GNU General Public License is included in this distribution in
20  * the file called "COPYING".
21  *
22  * Contact Information:
23  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25  *
26  ******************************************************************************/
27
28 #include "i40e_status.h"
29 #include "i40e_type.h"
30 #include "i40e_register.h"
31 #include "i40e_adminq.h"
32 #include "i40e_prototype.h"
33
34 /**
35  *  i40e_adminq_init_regs - Initialize AdminQ registers
36  *  @hw: pointer to the hardware structure
37  *
38  *  This assumes the alloc_asq and alloc_arq functions have already been called
39  **/
40 static void i40e_adminq_init_regs(struct i40e_hw *hw)
41 {
42         /* set head and tail registers in our local struct */
43         if (hw->mac.type == I40E_MAC_VF) {
44                 hw->aq.asq.tail = I40E_VF_ATQT1;
45                 hw->aq.asq.head = I40E_VF_ATQH1;
46                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
47                 hw->aq.arq.tail = I40E_VF_ARQT1;
48                 hw->aq.arq.head = I40E_VF_ARQH1;
49                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
50         } else {
51                 hw->aq.asq.tail = I40E_PF_ATQT;
52                 hw->aq.asq.head = I40E_PF_ATQH;
53                 hw->aq.asq.len  = I40E_PF_ATQLEN;
54                 hw->aq.arq.tail = I40E_PF_ARQT;
55                 hw->aq.arq.head = I40E_PF_ARQH;
56                 hw->aq.arq.len  = I40E_PF_ARQLEN;
57         }
58 }
59
60 /**
61  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
62  *  @hw: pointer to the hardware structure
63  **/
64 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
65 {
66         i40e_status ret_code;
67         struct i40e_virt_mem mem;
68
69         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
70                                          i40e_mem_atq_ring,
71                                          (hw->aq.num_asq_entries *
72                                          sizeof(struct i40e_aq_desc)),
73                                          I40E_ADMINQ_DESC_ALIGNMENT);
74         if (ret_code)
75                 return ret_code;
76
77         hw->aq.asq.desc = hw->aq.asq_mem.va;
78         hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
79
80         ret_code = i40e_allocate_virt_mem(hw, &mem,
81                                           (hw->aq.num_asq_entries *
82                                           sizeof(struct i40e_asq_cmd_details)));
83         if (ret_code) {
84                 i40e_free_dma_mem(hw, &hw->aq.asq_mem);
85                 hw->aq.asq_mem.va = NULL;
86                 hw->aq.asq_mem.pa = 0;
87                 return ret_code;
88         }
89
90         hw->aq.asq.details = mem.va;
91
92         return ret_code;
93 }
94
95 /**
96  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
97  *  @hw: pointer to the hardware structure
98  **/
99 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
100 {
101         i40e_status ret_code;
102
103         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
104                                          i40e_mem_arq_ring,
105                                          (hw->aq.num_arq_entries *
106                                          sizeof(struct i40e_aq_desc)),
107                                          I40E_ADMINQ_DESC_ALIGNMENT);
108         if (ret_code)
109                 return ret_code;
110
111         hw->aq.arq.desc = hw->aq.arq_mem.va;
112         hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
113
114         return ret_code;
115 }
116
117 /**
118  *  i40e_free_adminq_asq - Free Admin Queue send rings
119  *  @hw: pointer to the hardware structure
120  *
121  *  This assumes the posted send buffers have already been cleaned
122  *  and de-allocated
123  **/
124 static void i40e_free_adminq_asq(struct i40e_hw *hw)
125 {
126         struct i40e_virt_mem mem;
127
128         i40e_free_dma_mem(hw, &hw->aq.asq_mem);
129         hw->aq.asq_mem.va = NULL;
130         hw->aq.asq_mem.pa = 0;
131         mem.va = hw->aq.asq.details;
132         i40e_free_virt_mem(hw, &mem);
133         hw->aq.asq.details = NULL;
134 }
135
136 /**
137  *  i40e_free_adminq_arq - Free Admin Queue receive rings
138  *  @hw: pointer to the hardware structure
139  *
140  *  This assumes the posted receive buffers have already been cleaned
141  *  and de-allocated
142  **/
143 static void i40e_free_adminq_arq(struct i40e_hw *hw)
144 {
145         i40e_free_dma_mem(hw, &hw->aq.arq_mem);
146         hw->aq.arq_mem.va = NULL;
147         hw->aq.arq_mem.pa = 0;
148 }
149
150 /**
151  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
152  *  @hw:     pointer to the hardware structure
153  **/
154 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
155 {
156         i40e_status ret_code;
157         struct i40e_aq_desc *desc;
158         struct i40e_virt_mem mem;
159         struct i40e_dma_mem *bi;
160         int i;
161
162         /* We'll be allocating the buffer info memory first, then we can
163          * allocate the mapped buffers for the event processing
164          */
165
166         /* buffer_info structures do not need alignment */
167         ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
168                                           sizeof(struct i40e_dma_mem)));
169         if (ret_code)
170                 goto alloc_arq_bufs;
171         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
172
173         /* allocate the mapped buffers */
174         for (i = 0; i < hw->aq.num_arq_entries; i++) {
175                 bi = &hw->aq.arq.r.arq_bi[i];
176                 ret_code = i40e_allocate_dma_mem(hw, bi,
177                                                  i40e_mem_arq_buf,
178                                                  hw->aq.arq_buf_size,
179                                                  I40E_ADMINQ_DESC_ALIGNMENT);
180                 if (ret_code)
181                         goto unwind_alloc_arq_bufs;
182
183                 /* now configure the descriptors for use */
184                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
185
186                 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
187                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
188                         desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
189                 desc->opcode = 0;
190                 /* This is in accordance with Admin queue design, there is no
191                  * register for buffer size configuration
192                  */
193                 desc->datalen = cpu_to_le16((u16)bi->size);
194                 desc->retval = 0;
195                 desc->cookie_high = 0;
196                 desc->cookie_low = 0;
197                 desc->params.external.addr_high =
198                         cpu_to_le32(upper_32_bits(bi->pa));
199                 desc->params.external.addr_low =
200                         cpu_to_le32(lower_32_bits(bi->pa));
201                 desc->params.external.param0 = 0;
202                 desc->params.external.param1 = 0;
203         }
204
205 alloc_arq_bufs:
206         return ret_code;
207
208 unwind_alloc_arq_bufs:
209         /* don't try to free the one that failed... */
210         i--;
211         for (; i >= 0; i--)
212                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
213         mem.va = hw->aq.arq.r.arq_bi;
214         i40e_free_virt_mem(hw, &mem);
215
216         return ret_code;
217 }
218
219 /**
220  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
221  *  @hw:     pointer to the hardware structure
222  **/
223 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
224 {
225         i40e_status ret_code;
226         struct i40e_virt_mem mem;
227         struct i40e_dma_mem *bi;
228         int i;
229
230         /* No mapped memory needed yet, just the buffer info structures */
231         ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
232                                           sizeof(struct i40e_dma_mem)));
233         if (ret_code)
234                 goto alloc_asq_bufs;
235         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
236
237         /* allocate the mapped buffers */
238         for (i = 0; i < hw->aq.num_asq_entries; i++) {
239                 bi = &hw->aq.asq.r.asq_bi[i];
240                 ret_code = i40e_allocate_dma_mem(hw, bi,
241                                                  i40e_mem_asq_buf,
242                                                  hw->aq.asq_buf_size,
243                                                  I40E_ADMINQ_DESC_ALIGNMENT);
244                 if (ret_code)
245                         goto unwind_alloc_asq_bufs;
246         }
247 alloc_asq_bufs:
248         return ret_code;
249
250 unwind_alloc_asq_bufs:
251         /* don't try to free the one that failed... */
252         i--;
253         for (; i >= 0; i--)
254                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255         mem.va = hw->aq.asq.r.asq_bi;
256         i40e_free_virt_mem(hw, &mem);
257
258         return ret_code;
259 }
260
261 /**
262  *  i40e_free_arq_bufs - Free receive queue buffer info elements
263  *  @hw:     pointer to the hardware structure
264  **/
265 static void i40e_free_arq_bufs(struct i40e_hw *hw)
266 {
267         struct i40e_virt_mem mem;
268         int i;
269
270         for (i = 0; i < hw->aq.num_arq_entries; i++)
271                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
272
273         mem.va = hw->aq.arq.r.arq_bi;
274         i40e_free_virt_mem(hw, &mem);
275 }
276
277 /**
278  *  i40e_free_asq_bufs - Free send queue buffer info elements
279  *  @hw:     pointer to the hardware structure
280  **/
281 static void i40e_free_asq_bufs(struct i40e_hw *hw)
282 {
283         struct i40e_virt_mem mem;
284         int i;
285
286         /* only unmap if the address is non-NULL */
287         for (i = 0; i < hw->aq.num_asq_entries; i++)
288                 if (hw->aq.asq.r.asq_bi[i].pa)
289                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
290
291         /* now free the buffer info list */
292         mem.va = hw->aq.asq.r.asq_bi;
293         i40e_free_virt_mem(hw, &mem);
294 }
295
296 /**
297  *  i40e_config_asq_regs - configure ASQ registers
298  *  @hw:     pointer to the hardware structure
299  *
300  *  Configure base address and length registers for the transmit queue
301  **/
302 static void i40e_config_asq_regs(struct i40e_hw *hw)
303 {
304         if (hw->mac.type == I40E_MAC_VF) {
305                 /* configure the transmit queue */
306                 wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
307                 wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
308                 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
309                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
310         } else {
311                 /* configure the transmit queue */
312                 wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
313                 wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
314                 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
315                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
316         }
317 }
318
319 /**
320  *  i40e_config_arq_regs - ARQ register configuration
321  *  @hw:     pointer to the hardware structure
322  *
323  * Configure base address and length registers for the receive (event queue)
324  **/
325 static void i40e_config_arq_regs(struct i40e_hw *hw)
326 {
327         if (hw->mac.type == I40E_MAC_VF) {
328                 /* configure the receive queue */
329                 wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
330                 wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
331                 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
332                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
333         } else {
334                 /* configure the receive queue */
335                 wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
336                 wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
337                 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
338                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
339         }
340
341         /* Update tail in the HW to post pre-allocated buffers */
342         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
343 }
344
345 /**
346  *  i40e_init_asq - main initialization routine for ASQ
347  *  @hw:     pointer to the hardware structure
348  *
349  *  This is the main initialization routine for the Admin Send Queue
350  *  Prior to calling this function, drivers *MUST* set the following fields
351  *  in the hw->aq structure:
352  *     - hw->aq.num_asq_entries
353  *     - hw->aq.arq_buf_size
354  *
355  *  Do *NOT* hold the lock when calling this as the memory allocation routines
356  *  called are not going to be atomic context safe
357  **/
358 static i40e_status i40e_init_asq(struct i40e_hw *hw)
359 {
360         i40e_status ret_code = 0;
361
362         if (hw->aq.asq.count > 0) {
363                 /* queue already initialized */
364                 ret_code = I40E_ERR_NOT_READY;
365                 goto init_adminq_exit;
366         }
367
368         /* verify input for valid configuration */
369         if ((hw->aq.num_asq_entries == 0) ||
370             (hw->aq.asq_buf_size == 0)) {
371                 ret_code = I40E_ERR_CONFIG;
372                 goto init_adminq_exit;
373         }
374
375         hw->aq.asq.next_to_use = 0;
376         hw->aq.asq.next_to_clean = 0;
377         hw->aq.asq.count = hw->aq.num_asq_entries;
378
379         /* allocate the ring memory */
380         ret_code = i40e_alloc_adminq_asq_ring(hw);
381         if (ret_code)
382                 goto init_adminq_exit;
383
384         /* allocate buffers in the rings */
385         ret_code = i40e_alloc_asq_bufs(hw);
386         if (ret_code)
387                 goto init_adminq_free_rings;
388
389         /* initialize base registers */
390         i40e_config_asq_regs(hw);
391
392         /* success! */
393         goto init_adminq_exit;
394
395 init_adminq_free_rings:
396         i40e_free_adminq_asq(hw);
397
398 init_adminq_exit:
399         return ret_code;
400 }
401
402 /**
403  *  i40e_init_arq - initialize ARQ
404  *  @hw:     pointer to the hardware structure
405  *
406  *  The main initialization routine for the Admin Receive (Event) Queue.
407  *  Prior to calling this function, drivers *MUST* set the following fields
408  *  in the hw->aq structure:
409  *     - hw->aq.num_asq_entries
410  *     - hw->aq.arq_buf_size
411  *
412  *  Do *NOT* hold the lock when calling this as the memory allocation routines
413  *  called are not going to be atomic context safe
414  **/
415 static i40e_status i40e_init_arq(struct i40e_hw *hw)
416 {
417         i40e_status ret_code = 0;
418
419         if (hw->aq.arq.count > 0) {
420                 /* queue already initialized */
421                 ret_code = I40E_ERR_NOT_READY;
422                 goto init_adminq_exit;
423         }
424
425         /* verify input for valid configuration */
426         if ((hw->aq.num_arq_entries == 0) ||
427             (hw->aq.arq_buf_size == 0)) {
428                 ret_code = I40E_ERR_CONFIG;
429                 goto init_adminq_exit;
430         }
431
432         hw->aq.arq.next_to_use = 0;
433         hw->aq.arq.next_to_clean = 0;
434         hw->aq.arq.count = hw->aq.num_arq_entries;
435
436         /* allocate the ring memory */
437         ret_code = i40e_alloc_adminq_arq_ring(hw);
438         if (ret_code)
439                 goto init_adminq_exit;
440
441         /* allocate buffers in the rings */
442         ret_code = i40e_alloc_arq_bufs(hw);
443         if (ret_code)
444                 goto init_adminq_free_rings;
445
446         /* initialize base registers */
447         i40e_config_arq_regs(hw);
448
449         /* success! */
450         goto init_adminq_exit;
451
452 init_adminq_free_rings:
453         i40e_free_adminq_arq(hw);
454
455 init_adminq_exit:
456         return ret_code;
457 }
458
459 /**
460  *  i40e_shutdown_asq - shutdown the ASQ
461  *  @hw:     pointer to the hardware structure
462  *
463  *  The main shutdown routine for the Admin Send Queue
464  **/
465 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
466 {
467         i40e_status ret_code = 0;
468
469         if (hw->aq.asq.count == 0)
470                 return I40E_ERR_NOT_READY;
471
472         /* Stop firmware AdminQ processing */
473         wr32(hw, hw->aq.asq.head, 0);
474         wr32(hw, hw->aq.asq.tail, 0);
475         wr32(hw, hw->aq.asq.len, 0);
476
477         /* make sure lock is available */
478         mutex_lock(&hw->aq.asq_mutex);
479
480         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
481
482         /* free ring buffers */
483         i40e_free_asq_bufs(hw);
484         /* free the ring descriptors */
485         i40e_free_adminq_asq(hw);
486
487         mutex_unlock(&hw->aq.asq_mutex);
488
489         return ret_code;
490 }
491
492 /**
493  *  i40e_shutdown_arq - shutdown ARQ
494  *  @hw:     pointer to the hardware structure
495  *
496  *  The main shutdown routine for the Admin Receive Queue
497  **/
498 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
499 {
500         i40e_status ret_code = 0;
501
502         if (hw->aq.arq.count == 0)
503                 return I40E_ERR_NOT_READY;
504
505         /* Stop firmware AdminQ processing */
506         wr32(hw, hw->aq.arq.head, 0);
507         wr32(hw, hw->aq.arq.tail, 0);
508         wr32(hw, hw->aq.arq.len, 0);
509
510         /* make sure lock is available */
511         mutex_lock(&hw->aq.arq_mutex);
512
513         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
514
515         /* free ring buffers */
516         i40e_free_arq_bufs(hw);
517         /* free the ring descriptors */
518         i40e_free_adminq_arq(hw);
519
520         mutex_unlock(&hw->aq.arq_mutex);
521
522         return ret_code;
523 }
524
525 /**
526  *  i40e_init_adminq - main initialization routine for Admin Queue
527  *  @hw:     pointer to the hardware structure
528  *
529  *  Prior to calling this function, drivers *MUST* set the following fields
530  *  in the hw->aq structure:
531  *     - hw->aq.num_asq_entries
532  *     - hw->aq.num_arq_entries
533  *     - hw->aq.arq_buf_size
534  *     - hw->aq.asq_buf_size
535  **/
536 i40e_status i40e_init_adminq(struct i40e_hw *hw)
537 {
538         u16 eetrack_lo, eetrack_hi;
539         i40e_status ret_code;
540
541         /* verify input for valid configuration */
542         if ((hw->aq.num_arq_entries == 0) ||
543             (hw->aq.num_asq_entries == 0) ||
544             (hw->aq.arq_buf_size == 0) ||
545             (hw->aq.asq_buf_size == 0)) {
546                 ret_code = I40E_ERR_CONFIG;
547                 goto init_adminq_exit;
548         }
549
550         /* initialize locks */
551         mutex_init(&hw->aq.asq_mutex);
552         mutex_init(&hw->aq.arq_mutex);
553
554         /* Set up register offsets */
555         i40e_adminq_init_regs(hw);
556
557         /* allocate the ASQ */
558         ret_code = i40e_init_asq(hw);
559         if (ret_code)
560                 goto init_adminq_destroy_locks;
561
562         /* allocate the ARQ */
563         ret_code = i40e_init_arq(hw);
564         if (ret_code)
565                 goto init_adminq_free_asq;
566
567         ret_code = i40e_aq_get_firmware_version(hw,
568                                      &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
569                                      &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
570                                      NULL);
571         if (ret_code)
572                 goto init_adminq_free_arq;
573
574         if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
575             hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
576                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
577                 goto init_adminq_free_arq;
578         }
579         i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
580         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
581         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
582         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
583
584         ret_code = i40e_aq_set_hmc_resource_profile(hw,
585                                                     I40E_HMC_PROFILE_DEFAULT,
586                                                     0,
587                                                     NULL);
588         ret_code = 0;
589
590         /* success! */
591         goto init_adminq_exit;
592
593 init_adminq_free_arq:
594         i40e_shutdown_arq(hw);
595 init_adminq_free_asq:
596         i40e_shutdown_asq(hw);
597 init_adminq_destroy_locks:
598
599 init_adminq_exit:
600         return ret_code;
601 }
602
603 /**
604  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
605  *  @hw:     pointer to the hardware structure
606  **/
607 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
608 {
609         i40e_status ret_code = 0;
610
611         i40e_shutdown_asq(hw);
612         i40e_shutdown_arq(hw);
613
614         /* destroy the locks */
615
616         return ret_code;
617 }
618
619 /**
620  *  i40e_clean_asq - cleans Admin send queue
621  *  @asq: pointer to the adminq send ring
622  *
623  *  returns the number of free desc
624  **/
625 static u16 i40e_clean_asq(struct i40e_hw *hw)
626 {
627         struct i40e_adminq_ring *asq = &(hw->aq.asq);
628         struct i40e_asq_cmd_details *details;
629         u16 ntc = asq->next_to_clean;
630         struct i40e_aq_desc desc_cb;
631         struct i40e_aq_desc *desc;
632
633         desc = I40E_ADMINQ_DESC(*asq, ntc);
634         details = I40E_ADMINQ_DETAILS(*asq, ntc);
635         while (rd32(hw, hw->aq.asq.head) != ntc) {
636                 if (details->callback) {
637                         I40E_ADMINQ_CALLBACK cb_func =
638                                         (I40E_ADMINQ_CALLBACK)details->callback;
639                         desc_cb = *desc;
640                         cb_func(hw, &desc_cb);
641                 }
642                 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
643                 memset((void *)details, 0,
644                        sizeof(struct i40e_asq_cmd_details));
645                 ntc++;
646                 if (ntc == asq->count)
647                         ntc = 0;
648                 desc = I40E_ADMINQ_DESC(*asq, ntc);
649                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
650         }
651
652         asq->next_to_clean = ntc;
653
654         return I40E_DESC_UNUSED(asq);
655 }
656
657 /**
658  *  i40e_asq_done - check if FW has processed the Admin Send Queue
659  *  @hw: pointer to the hw struct
660  *
661  *  Returns true if the firmware has processed all descriptors on the
662  *  admin send queue. Returns false if there are still requests pending.
663  **/
664 bool i40e_asq_done(struct i40e_hw *hw)
665 {
666         /* AQ designers suggest use of head for better
667          * timing reliability than DD bit
668          */
669         return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
670
671 }
672
673 /**
674  *  i40e_asq_send_command - send command to Admin Queue
675  *  @hw: pointer to the hw struct
676  *  @desc: prefilled descriptor describing the command (non DMA mem)
677  *  @buff: buffer to use for indirect commands
678  *  @buff_size: size of buffer for indirect commands
679  *  @opaque: pointer to info to be used in async cleanup
680  *
681  *  This is the main send command driver routine for the Admin Queue send
682  *  queue.  It runs the queue, cleans the queue, etc
683  **/
684 i40e_status i40e_asq_send_command(struct i40e_hw *hw,
685                                 struct i40e_aq_desc *desc,
686                                 void *buff, /* can be NULL */
687                                 u16  buff_size,
688                                 struct i40e_asq_cmd_details *cmd_details)
689 {
690         i40e_status status = 0;
691         struct i40e_dma_mem *dma_buff = NULL;
692         struct i40e_asq_cmd_details *details;
693         struct i40e_aq_desc *desc_on_ring;
694         bool cmd_completed = false;
695         u16  retval = 0;
696
697         if (hw->aq.asq.count == 0) {
698                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
699                            "AQTX: Admin queue not initialized.\n");
700                 status = I40E_ERR_QUEUE_EMPTY;
701                 goto asq_send_command_exit;
702         }
703
704         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
705         if (cmd_details) {
706                 *details = *cmd_details;
707
708                 /* If the cmd_details are defined copy the cookie.  The
709                  * cpu_to_le32 is not needed here because the data is ignored
710                  * by the FW, only used by the driver
711                  */
712                 if (details->cookie) {
713                         desc->cookie_high =
714                                 cpu_to_le32(upper_32_bits(details->cookie));
715                         desc->cookie_low =
716                                 cpu_to_le32(lower_32_bits(details->cookie));
717                 }
718         } else {
719                 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
720         }
721
722         /* clear requested flags and then set additional flags if defined */
723         desc->flags &= ~cpu_to_le16(details->flags_dis);
724         desc->flags |= cpu_to_le16(details->flags_ena);
725
726         mutex_lock(&hw->aq.asq_mutex);
727
728         if (buff_size > hw->aq.asq_buf_size) {
729                 i40e_debug(hw,
730                            I40E_DEBUG_AQ_MESSAGE,
731                            "AQTX: Invalid buffer size: %d.\n",
732                            buff_size);
733                 status = I40E_ERR_INVALID_SIZE;
734                 goto asq_send_command_error;
735         }
736
737         if (details->postpone && !details->async) {
738                 i40e_debug(hw,
739                            I40E_DEBUG_AQ_MESSAGE,
740                            "AQTX: Async flag not set along with postpone flag");
741                 status = I40E_ERR_PARAM;
742                 goto asq_send_command_error;
743         }
744
745         /* call clean and check queue available function to reclaim the
746          * descriptors that were processed by FW, the function returns the
747          * number of desc available
748          */
749         /* the clean function called here could be called in a separate thread
750          * in case of asynchronous completions
751          */
752         if (i40e_clean_asq(hw) == 0) {
753                 i40e_debug(hw,
754                            I40E_DEBUG_AQ_MESSAGE,
755                            "AQTX: Error queue is full.\n");
756                 status = I40E_ERR_ADMIN_QUEUE_FULL;
757                 goto asq_send_command_error;
758         }
759
760         /* initialize the temp desc pointer with the right desc */
761         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
762
763         /* if the desc is available copy the temp desc to the right place */
764         *desc_on_ring = *desc;
765
766         /* if buff is not NULL assume indirect command */
767         if (buff != NULL) {
768                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
769                 /* copy the user buff into the respective DMA buff */
770                 memcpy(dma_buff->va, buff, buff_size);
771                 desc_on_ring->datalen = cpu_to_le16(buff_size);
772
773                 /* Update the address values in the desc with the pa value
774                  * for respective buffer
775                  */
776                 desc_on_ring->params.external.addr_high =
777                                 cpu_to_le32(upper_32_bits(dma_buff->pa));
778                 desc_on_ring->params.external.addr_low =
779                                 cpu_to_le32(lower_32_bits(dma_buff->pa));
780         }
781
782         /* bump the tail */
783         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
784         (hw->aq.asq.next_to_use)++;
785         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
786                 hw->aq.asq.next_to_use = 0;
787         if (!details->postpone)
788                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
789
790         /* if cmd_details are not defined or async flag is not set,
791          * we need to wait for desc write back
792          */
793         if (!details->async && !details->postpone) {
794                 u32 total_delay = 0;
795                 u32 delay_len = 10;
796
797                 do {
798                         /* AQ designers suggest use of head for better
799                          * timing reliability than DD bit
800                          */
801                         if (i40e_asq_done(hw))
802                                 break;
803                         /* ugh! delay while spin_lock */
804                         udelay(delay_len);
805                         total_delay += delay_len;
806                 } while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
807         }
808
809         /* if ready, copy the desc back to temp */
810         if (i40e_asq_done(hw)) {
811                 *desc = *desc_on_ring;
812                 if (buff != NULL)
813                         memcpy(buff, dma_buff->va, buff_size);
814                 retval = le16_to_cpu(desc->retval);
815                 if (retval != 0) {
816                         i40e_debug(hw,
817                                    I40E_DEBUG_AQ_MESSAGE,
818                                    "AQTX: Command completed with error 0x%X.\n",
819                                    retval);
820                         /* strip off FW internal code */
821                         retval &= 0xff;
822                 }
823                 cmd_completed = true;
824                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
825                         status = 0;
826                 else
827                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
828                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
829         }
830
831         /* update the error if time out occurred */
832         if ((!cmd_completed) &&
833             (!details->async && !details->postpone)) {
834                 i40e_debug(hw,
835                            I40E_DEBUG_AQ_MESSAGE,
836                            "AQTX: Writeback timeout.\n");
837                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
838         }
839
840 asq_send_command_error:
841         mutex_unlock(&hw->aq.asq_mutex);
842 asq_send_command_exit:
843         return status;
844 }
845
846 /**
847  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
848  *  @desc:     pointer to the temp descriptor (non DMA mem)
849  *  @opcode:   the opcode can be used to decide which flags to turn off or on
850  *
851  *  Fill the desc with default values
852  **/
853 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
854                                        u16 opcode)
855 {
856         /* zero out the desc */
857         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
858         desc->opcode = cpu_to_le16(opcode);
859         desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
860 }
861
862 /**
863  *  i40e_clean_arq_element
864  *  @hw: pointer to the hw struct
865  *  @e: event info from the receive descriptor, includes any buffers
866  *  @pending: number of events that could be left to process
867  *
868  *  This function cleans one Admin Receive Queue element and returns
869  *  the contents through e.  It can also return how many events are
870  *  left to process through 'pending'
871  **/
872 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
873                                              struct i40e_arq_event_info *e,
874                                              u16 *pending)
875 {
876         i40e_status ret_code = 0;
877         u16 ntc = hw->aq.arq.next_to_clean;
878         struct i40e_aq_desc *desc;
879         struct i40e_dma_mem *bi;
880         u16 desc_idx;
881         u16 datalen;
882         u16 flags;
883         u16 ntu;
884
885         /* take the lock before we start messing with the ring */
886         mutex_lock(&hw->aq.arq_mutex);
887
888         /* set next_to_use to head */
889         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
890         if (ntu == ntc) {
891                 /* nothing to do - shouldn't need to update ring's values */
892                 i40e_debug(hw,
893                            I40E_DEBUG_AQ_MESSAGE,
894                            "AQRX: Queue is empty.\n");
895                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
896                 goto clean_arq_element_out;
897         }
898
899         /* now clean the next descriptor */
900         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
901         desc_idx = ntc;
902         i40e_debug_aq(hw,
903                       I40E_DEBUG_AQ_COMMAND,
904                       (void *)desc,
905                       hw->aq.arq.r.arq_bi[desc_idx].va);
906
907         flags = le16_to_cpu(desc->flags);
908         if (flags & I40E_AQ_FLAG_ERR) {
909                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
910                 hw->aq.arq_last_status =
911                         (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
912                 i40e_debug(hw,
913                            I40E_DEBUG_AQ_MESSAGE,
914                            "AQRX: Event received with error 0x%X.\n",
915                            hw->aq.arq_last_status);
916         } else {
917                 memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
918                 datalen = le16_to_cpu(desc->datalen);
919                 e->msg_size = min(datalen, e->msg_size);
920                 if (e->msg_buf != NULL && (e->msg_size != 0))
921                         memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
922                                e->msg_size);
923         }
924
925         /* Restore the original datalen and buffer address in the desc,
926          * FW updates datalen to indicate the event message
927          * size
928          */
929         bi = &hw->aq.arq.r.arq_bi[ntc];
930         desc->datalen = cpu_to_le16((u16)bi->size);
931         desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
932         desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
933
934         /* set tail = the last cleaned desc index. */
935         wr32(hw, hw->aq.arq.tail, ntc);
936         /* ntc is updated to tail + 1 */
937         ntc++;
938         if (ntc == hw->aq.num_arq_entries)
939                 ntc = 0;
940         hw->aq.arq.next_to_clean = ntc;
941         hw->aq.arq.next_to_use = ntu;
942
943 clean_arq_element_out:
944         /* Set pending if needed, unlock and return */
945         if (pending != NULL)
946                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
947         mutex_unlock(&hw->aq.arq_mutex);
948
949         return ret_code;
950 }
951
952 void i40e_resume_aq(struct i40e_hw *hw)
953 {
954         u32 reg = 0;
955
956         /* Registers are reset after PF reset */
957         hw->aq.asq.next_to_use = 0;
958         hw->aq.asq.next_to_clean = 0;
959
960         i40e_config_asq_regs(hw);
961         reg = hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK;
962         wr32(hw, hw->aq.asq.len, reg);
963
964         hw->aq.arq.next_to_use = 0;
965         hw->aq.arq.next_to_clean = 0;
966
967         i40e_config_arq_regs(hw);
968         reg = hw->aq.num_arq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK;
969         wr32(hw, hw->aq.arq.len, reg);
970 }