Merge remote-tracking branches 'asoc/topic/rl6231', 'asoc/topic/rockchip', 'asoc...
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / i40evf / i40evf_virtchnl.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include "i40evf.h"
28 #include "i40e_prototype.h"
29
30 /* busy wait delay in msec */
31 #define I40EVF_BUSY_WAIT_DELAY 10
32 #define I40EVF_BUSY_WAIT_COUNT 50
33
34 /**
35  * i40evf_send_pf_msg
36  * @adapter: adapter structure
37  * @op: virtual channel opcode
38  * @msg: pointer to message buffer
39  * @len: message length
40  *
41  * Send message to PF and print status if failure.
42  **/
43 static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
44                               enum i40e_virtchnl_ops op, u8 *msg, u16 len)
45 {
46         struct i40e_hw *hw = &adapter->hw;
47         i40e_status err;
48
49         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
50                 return 0; /* nothing to see here, move along */
51
52         err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
53         if (err)
54                 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
55                         op, err, hw->aq.asq_last_status);
56         return err;
57 }
58
59 /**
60  * i40evf_send_api_ver
61  * @adapter: adapter structure
62  *
63  * Send API version admin queue message to the PF. The reply is not checked
64  * in this function. Returns 0 if the message was successfully
65  * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
66  **/
67 int i40evf_send_api_ver(struct i40evf_adapter *adapter)
68 {
69         struct i40e_virtchnl_version_info vvi;
70
71         vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
72         vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
73
74         return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
75                                   sizeof(vvi));
76 }
77
78 /**
79  * i40evf_verify_api_ver
80  * @adapter: adapter structure
81  *
82  * Compare API versions with the PF. Must be called after admin queue is
83  * initialized. Returns 0 if API versions match, -EIO if
84  * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
85  **/
86 int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
87 {
88         struct i40e_virtchnl_version_info *pf_vvi;
89         struct i40e_hw *hw = &adapter->hw;
90         struct i40e_arq_event_info event;
91         i40e_status err;
92
93         event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
94         event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
95         if (!event.msg_buf) {
96                 err = -ENOMEM;
97                 goto out;
98         }
99
100         err = i40evf_clean_arq_element(hw, &event, NULL);
101         if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
102                 goto out_alloc;
103
104         err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
105         if (err) {
106                 err = -EIO;
107                 goto out_alloc;
108         }
109
110         if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
111             I40E_VIRTCHNL_OP_VERSION) {
112                 err = -EIO;
113                 goto out_alloc;
114         }
115
116         pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
117         if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
118             (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
119                 err = -EIO;
120
121 out_alloc:
122         kfree(event.msg_buf);
123 out:
124         return err;
125 }
126
127 /**
128  * i40evf_send_vf_config_msg
129  * @adapter: adapter structure
130  *
131  * Send VF configuration request admin queue message to the PF. The reply
132  * is not checked in this function. Returns 0 if the message was
133  * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
134  **/
135 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
136 {
137         return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
138                                   NULL, 0);
139 }
140
141 /**
142  * i40evf_get_vf_config
143  * @hw: pointer to the hardware structure
144  * @len: length of buffer
145  *
146  * Get VF configuration from PF and populate hw structure. Must be called after
147  * admin queue is initialized. Busy waits until response is received from PF,
148  * with maximum timeout. Response from PF is returned in the buffer for further
149  * processing by the caller.
150  **/
151 int i40evf_get_vf_config(struct i40evf_adapter *adapter)
152 {
153         struct i40e_hw *hw = &adapter->hw;
154         struct i40e_arq_event_info event;
155         u16 len;
156         i40e_status err;
157
158         len =  sizeof(struct i40e_virtchnl_vf_resource) +
159                 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
160         event.msg_size = len;
161         event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
162         if (!event.msg_buf) {
163                 err = -ENOMEM;
164                 goto out;
165         }
166
167         err = i40evf_clean_arq_element(hw, &event, NULL);
168         if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
169                 goto out_alloc;
170
171         err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
172         if (err) {
173                 dev_err(&adapter->pdev->dev,
174                         "%s: Error returned from PF, %d, %d\n", __func__,
175                         le32_to_cpu(event.desc.cookie_high),
176                         le32_to_cpu(event.desc.cookie_low));
177                 err = -EIO;
178                 goto out_alloc;
179         }
180
181         if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
182             I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
183                 dev_err(&adapter->pdev->dev,
184                         "%s: Invalid response from PF, %d, %d\n", __func__,
185                         le32_to_cpu(event.desc.cookie_high),
186                         le32_to_cpu(event.desc.cookie_low));
187                 err = -EIO;
188                 goto out_alloc;
189         }
190         memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
191
192         i40e_vf_parse_hw_config(hw, adapter->vf_res);
193 out_alloc:
194         kfree(event.msg_buf);
195 out:
196         return err;
197 }
198
199 /**
200  * i40evf_configure_queues
201  * @adapter: adapter structure
202  *
203  * Request that the PF set up our (previously allocated) queues.
204  **/
205 void i40evf_configure_queues(struct i40evf_adapter *adapter)
206 {
207         struct i40e_virtchnl_vsi_queue_config_info *vqci;
208         struct i40e_virtchnl_queue_pair_info *vqpi;
209         int pairs = adapter->vsi_res->num_queue_pairs;
210         int i, len;
211
212         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
213                 /* bail because we already have a command pending */
214                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
215                         __func__, adapter->current_op);
216                 return;
217         }
218         adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
219         len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
220                        (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
221         vqci = kzalloc(len, GFP_ATOMIC);
222         if (!vqci)
223                 return;
224
225         vqci->vsi_id = adapter->vsi_res->vsi_id;
226         vqci->num_queue_pairs = pairs;
227         vqpi = vqci->qpair;
228         /* Size check is not needed here - HW max is 16 queue pairs, and we
229          * can fit info for 31 of them into the AQ buffer before it overflows.
230          */
231         for (i = 0; i < pairs; i++) {
232                 vqpi->txq.vsi_id = vqci->vsi_id;
233                 vqpi->txq.queue_id = i;
234                 vqpi->txq.ring_len = adapter->tx_rings[i]->count;
235                 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
236                 vqpi->txq.headwb_enabled = 1;
237                 vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
238                     (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
239
240                 vqpi->rxq.vsi_id = vqci->vsi_id;
241                 vqpi->rxq.queue_id = i;
242                 vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
243                 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
244                 vqpi->rxq.max_pkt_size = adapter->netdev->mtu
245                                         + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
246                 vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
247                 vqpi++;
248         }
249
250         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
251                            (u8 *)vqci, len);
252         kfree(vqci);
253         adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
254         adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
255 }
256
257 /**
258  * i40evf_enable_queues
259  * @adapter: adapter structure
260  *
261  * Request that the PF enable all of our queues.
262  **/
263 void i40evf_enable_queues(struct i40evf_adapter *adapter)
264 {
265         struct i40e_virtchnl_queue_select vqs;
266
267         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
268                 /* bail because we already have a command pending */
269                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
270                         __func__, adapter->current_op);
271                 return;
272         }
273         adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
274         vqs.vsi_id = adapter->vsi_res->vsi_id;
275         vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
276         vqs.rx_queues = vqs.tx_queues;
277         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
278                            (u8 *)&vqs, sizeof(vqs));
279         adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
280         adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
281 }
282
283 /**
284  * i40evf_disable_queues
285  * @adapter: adapter structure
286  *
287  * Request that the PF disable all of our queues.
288  **/
289 void i40evf_disable_queues(struct i40evf_adapter *adapter)
290 {
291         struct i40e_virtchnl_queue_select vqs;
292
293         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
294                 /* bail because we already have a command pending */
295                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
296                         __func__, adapter->current_op);
297                 return;
298         }
299         adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
300         vqs.vsi_id = adapter->vsi_res->vsi_id;
301         vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
302         vqs.rx_queues = vqs.tx_queues;
303         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
304                            (u8 *)&vqs, sizeof(vqs));
305         adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
306         adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
307 }
308
309 /**
310  * i40evf_map_queues
311  * @adapter: adapter structure
312  *
313  * Request that the PF map queues to interrupt vectors. Misc causes, including
314  * admin queue, are always mapped to vector 0.
315  **/
316 void i40evf_map_queues(struct i40evf_adapter *adapter)
317 {
318         struct i40e_virtchnl_irq_map_info *vimi;
319         int v_idx, q_vectors, len;
320         struct i40e_q_vector *q_vector;
321
322         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
323                 /* bail because we already have a command pending */
324                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
325                         __func__, adapter->current_op);
326                 return;
327         }
328         adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
329
330         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
331
332         len = sizeof(struct i40e_virtchnl_irq_map_info) +
333               (adapter->num_msix_vectors *
334                 sizeof(struct i40e_virtchnl_vector_map));
335         vimi = kzalloc(len, GFP_ATOMIC);
336         if (!vimi)
337                 return;
338
339         vimi->num_vectors = adapter->num_msix_vectors;
340         /* Queue vectors first */
341         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
342                 q_vector = adapter->q_vector[v_idx];
343                 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
344                 vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
345                 vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
346                 vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
347         }
348         /* Misc vector last - this is only for AdminQ messages */
349         vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
350         vimi->vecmap[v_idx].vector_id = 0;
351         vimi->vecmap[v_idx].txq_map = 0;
352         vimi->vecmap[v_idx].rxq_map = 0;
353
354         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
355                            (u8 *)vimi, len);
356         kfree(vimi);
357         adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
358         adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
359 }
360
361 /**
362  * i40evf_add_ether_addrs
363  * @adapter: adapter structure
364  * @addrs: the MAC address filters to add (contiguous)
365  * @count: number of filters
366  *
367  * Request that the PF add one or more addresses to our filters.
368  **/
369 void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
370 {
371         struct i40e_virtchnl_ether_addr_list *veal;
372         int len, i = 0, count = 0;
373         struct i40evf_mac_filter *f;
374
375         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
376                 /* bail because we already have a command pending */
377                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
378                         __func__, adapter->current_op);
379                 return;
380         }
381         list_for_each_entry(f, &adapter->mac_filter_list, list) {
382                 if (f->add)
383                         count++;
384         }
385         if (!count) {
386                 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
387                 return;
388         }
389         adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
390
391         len = sizeof(struct i40e_virtchnl_ether_addr_list) +
392               (count * sizeof(struct i40e_virtchnl_ether_addr));
393         if (len > I40EVF_MAX_AQ_BUF_SIZE) {
394                 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
395                         __func__);
396                 count = (I40EVF_MAX_AQ_BUF_SIZE -
397                          sizeof(struct i40e_virtchnl_ether_addr_list)) /
398                         sizeof(struct i40e_virtchnl_ether_addr);
399                 len = I40EVF_MAX_AQ_BUF_SIZE;
400         }
401
402         veal = kzalloc(len, GFP_ATOMIC);
403         if (!veal)
404                 return;
405
406         veal->vsi_id = adapter->vsi_res->vsi_id;
407         veal->num_elements = count;
408         list_for_each_entry(f, &adapter->mac_filter_list, list) {
409                 if (f->add) {
410                         ether_addr_copy(veal->list[i].addr, f->macaddr);
411                         i++;
412                         f->add = false;
413                 }
414         }
415         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
416                            (u8 *)veal, len);
417         kfree(veal);
418         adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
419         adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
420
421 }
422
423 /**
424  * i40evf_del_ether_addrs
425  * @adapter: adapter structure
426  * @addrs: the MAC address filters to remove (contiguous)
427  * @count: number of filtes
428  *
429  * Request that the PF remove one or more addresses from our filters.
430  **/
431 void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
432 {
433         struct i40e_virtchnl_ether_addr_list *veal;
434         struct i40evf_mac_filter *f, *ftmp;
435         int len, i = 0, count = 0;
436
437         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
438                 /* bail because we already have a command pending */
439                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
440                         __func__, adapter->current_op);
441                 return;
442         }
443         list_for_each_entry(f, &adapter->mac_filter_list, list) {
444                 if (f->remove)
445                         count++;
446         }
447         if (!count) {
448                 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
449                 return;
450         }
451         adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
452
453         len = sizeof(struct i40e_virtchnl_ether_addr_list) +
454               (count * sizeof(struct i40e_virtchnl_ether_addr));
455         if (len > I40EVF_MAX_AQ_BUF_SIZE) {
456                 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
457                         __func__);
458                 count = (I40EVF_MAX_AQ_BUF_SIZE -
459                          sizeof(struct i40e_virtchnl_ether_addr_list)) /
460                         sizeof(struct i40e_virtchnl_ether_addr);
461                 len = I40EVF_MAX_AQ_BUF_SIZE;
462         }
463         veal = kzalloc(len, GFP_ATOMIC);
464         if (!veal)
465                 return;
466
467         veal->vsi_id = adapter->vsi_res->vsi_id;
468         veal->num_elements = count;
469         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
470                 if (f->remove) {
471                         ether_addr_copy(veal->list[i].addr, f->macaddr);
472                         i++;
473                         list_del(&f->list);
474                         kfree(f);
475                 }
476         }
477         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
478                            (u8 *)veal, len);
479         kfree(veal);
480         adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
481         adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
482 }
483
484 /**
485  * i40evf_add_vlans
486  * @adapter: adapter structure
487  * @vlans: the VLANs to add
488  * @count: number of VLANs
489  *
490  * Request that the PF add one or more VLAN filters to our VSI.
491  **/
492 void i40evf_add_vlans(struct i40evf_adapter *adapter)
493 {
494         struct i40e_virtchnl_vlan_filter_list *vvfl;
495         int len, i = 0, count = 0;
496         struct i40evf_vlan_filter *f;
497
498         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
499                 /* bail because we already have a command pending */
500                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
501                         __func__, adapter->current_op);
502                 return;
503         }
504
505         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
506                 if (f->add)
507                         count++;
508         }
509         if (!count) {
510                 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
511                 return;
512         }
513         adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
514
515         len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
516               (count * sizeof(u16));
517         if (len > I40EVF_MAX_AQ_BUF_SIZE) {
518                 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
519                         __func__);
520                 count = (I40EVF_MAX_AQ_BUF_SIZE -
521                          sizeof(struct i40e_virtchnl_vlan_filter_list)) /
522                         sizeof(u16);
523                 len = I40EVF_MAX_AQ_BUF_SIZE;
524         }
525         vvfl = kzalloc(len, GFP_ATOMIC);
526         if (!vvfl)
527                 return;
528
529         vvfl->vsi_id = adapter->vsi_res->vsi_id;
530         vvfl->num_elements = count;
531         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
532                 if (f->add) {
533                         vvfl->vlan_id[i] = f->vlan;
534                         i++;
535                         f->add = false;
536                 }
537         }
538         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
539         kfree(vvfl);
540         adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
541         adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
542 }
543
544 /**
545  * i40evf_del_vlans
546  * @adapter: adapter structure
547  * @vlans: the VLANs to remove
548  * @count: number of VLANs
549  *
550  * Request that the PF remove one or more VLAN filters from our VSI.
551  **/
552 void i40evf_del_vlans(struct i40evf_adapter *adapter)
553 {
554         struct i40e_virtchnl_vlan_filter_list *vvfl;
555         struct i40evf_vlan_filter *f, *ftmp;
556         int len, i = 0, count = 0;
557
558         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
559                 /* bail because we already have a command pending */
560                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
561                         __func__, adapter->current_op);
562                 return;
563         }
564
565         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
566                 if (f->remove)
567                         count++;
568         }
569         if (!count) {
570                 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
571                 return;
572         }
573         adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
574
575         len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
576               (count * sizeof(u16));
577         if (len > I40EVF_MAX_AQ_BUF_SIZE) {
578                 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
579                         __func__);
580                 count = (I40EVF_MAX_AQ_BUF_SIZE -
581                          sizeof(struct i40e_virtchnl_vlan_filter_list)) /
582                         sizeof(u16);
583                 len = I40EVF_MAX_AQ_BUF_SIZE;
584         }
585         vvfl = kzalloc(len, GFP_ATOMIC);
586         if (!vvfl)
587                 return;
588
589         vvfl->vsi_id = adapter->vsi_res->vsi_id;
590         vvfl->num_elements = count;
591         list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
592                 if (f->remove) {
593                         vvfl->vlan_id[i] = f->vlan;
594                         i++;
595                         list_del(&f->list);
596                         kfree(f);
597                 }
598         }
599         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
600         kfree(vvfl);
601         adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
602         adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
603 }
604
605 /**
606  * i40evf_set_promiscuous
607  * @adapter: adapter structure
608  * @flags: bitmask to control unicast/multicast promiscuous.
609  *
610  * Request that the PF enable promiscuous mode for our VSI.
611  **/
612 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
613 {
614         struct i40e_virtchnl_promisc_info vpi;
615
616         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
617                 /* bail because we already have a command pending */
618                 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
619                         __func__, adapter->current_op);
620                 return;
621         }
622         adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
623         vpi.vsi_id = adapter->vsi_res->vsi_id;
624         vpi.flags = flags;
625         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
626                            (u8 *)&vpi, sizeof(vpi));
627 }
628
629 /**
630  * i40evf_request_stats
631  * @adapter: adapter structure
632  *
633  * Request VSI statistics from PF.
634  **/
635 void i40evf_request_stats(struct i40evf_adapter *adapter)
636 {
637         struct i40e_virtchnl_queue_select vqs;
638         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
639                 /* no error message, this isn't crucial */
640                 return;
641         }
642         adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
643         vqs.vsi_id = adapter->vsi_res->vsi_id;
644         /* queue maps are ignored for this message - only the vsi is used */
645         if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
646                                (u8 *)&vqs, sizeof(vqs)))
647                 /* if the request failed, don't lock out others */
648                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
649 }
650 /**
651  * i40evf_request_reset
652  * @adapter: adapter structure
653  *
654  * Request that the PF reset this VF. No response is expected.
655  **/
656 void i40evf_request_reset(struct i40evf_adapter *adapter)
657 {
658         /* Don't check CURRENT_OP - this is always higher priority */
659         i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
660         adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
661 }
662
663 /**
664  * i40evf_virtchnl_completion
665  * @adapter: adapter structure
666  * @v_opcode: opcode sent by PF
667  * @v_retval: retval sent by PF
668  * @msg: message sent by PF
669  * @msglen: message length
670  *
671  * Asynchronous completion function for admin queue messages. Rather than busy
672  * wait, we fire off our requests and assume that no errors will be returned.
673  * This function handles the reply messages.
674  **/
675 void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
676                                 enum i40e_virtchnl_ops v_opcode,
677                                 i40e_status v_retval,
678                                 u8 *msg, u16 msglen)
679 {
680         struct net_device *netdev = adapter->netdev;
681
682         if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
683                 struct i40e_virtchnl_pf_event *vpe =
684                         (struct i40e_virtchnl_pf_event *)msg;
685                 switch (vpe->event) {
686                 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
687                         adapter->link_up =
688                                 vpe->event_data.link_event.link_status;
689                         if (adapter->link_up && !netif_carrier_ok(netdev)) {
690                                 dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
691                                 netif_carrier_on(netdev);
692                                 netif_tx_wake_all_queues(netdev);
693                         } else if (!adapter->link_up) {
694                                 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
695                                 netif_carrier_off(netdev);
696                                 netif_tx_stop_all_queues(netdev);
697                         }
698                         break;
699                 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
700                         dev_info(&adapter->pdev->dev, "PF reset warning received\n");
701                         if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
702                                 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
703                                 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
704                                 schedule_work(&adapter->reset_task);
705                         }
706                         break;
707                 default:
708                         dev_err(&adapter->pdev->dev,
709                                 "%s: Unknown event %d from pf\n",
710                                 __func__, vpe->event);
711                         break;
712
713                 }
714                 return;
715         }
716         if (v_opcode != adapter->current_op) {
717                 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
718                         __func__, adapter->current_op, v_opcode);
719                 /* We're probably completely screwed at this point, but clear
720                  * the current op and try to carry on....
721                  */
722                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
723                 return;
724         }
725         if (v_retval) {
726                 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
727                         __func__, v_retval, v_opcode);
728         }
729         switch (v_opcode) {
730         case I40E_VIRTCHNL_OP_GET_STATS: {
731                 struct i40e_eth_stats *stats =
732                         (struct i40e_eth_stats *)msg;
733                 adapter->net_stats.rx_packets = stats->rx_unicast +
734                                                  stats->rx_multicast +
735                                                  stats->rx_broadcast;
736                 adapter->net_stats.tx_packets = stats->tx_unicast +
737                                                  stats->tx_multicast +
738                                                  stats->tx_broadcast;
739                 adapter->net_stats.rx_bytes = stats->rx_bytes;
740                 adapter->net_stats.tx_bytes = stats->tx_bytes;
741                 adapter->net_stats.tx_errors = stats->tx_errors;
742                 adapter->net_stats.rx_dropped = stats->rx_discards;
743                 adapter->net_stats.tx_dropped = stats->tx_discards;
744                 adapter->current_stats = *stats;
745                 }
746                 break;
747         case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
748                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
749                 break;
750         case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
751                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
752                 break;
753         case I40E_VIRTCHNL_OP_ADD_VLAN:
754                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
755                 break;
756         case I40E_VIRTCHNL_OP_DEL_VLAN:
757                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
758                 break;
759         case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
760                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
761                 /* enable transmits */
762                 i40evf_irq_enable(adapter, true);
763                 netif_tx_start_all_queues(adapter->netdev);
764                 netif_carrier_on(adapter->netdev);
765                 break;
766         case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
767                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
768                 break;
769         case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
770                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
771                 break;
772         case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
773                 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
774                 break;
775         default:
776                 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
777                         __func__, v_opcode);
778                 break;
779         } /* switch v_opcode */
780         adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
781 }