2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
24 return wrb->payload.embedded_payload;
27 static void be_mcc_notify(struct be_adapter *adapter)
29 struct be_queue_info *mccq = &adapter->mcc_obj.q;
32 if (be_error(adapter))
35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
39 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
42 /* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
62 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
67 addr = ((addr << 16) << 16) | tag0;
71 static int be_mcc_compl_process(struct be_adapter *adapter,
72 struct be_mcc_compl *compl)
74 u16 compl_status, extd_status;
75 struct be_cmd_resp_hdr *resp_hdr;
76 u8 opcode = 0, subsystem = 0;
78 /* Just swap the status to host endian; mcc tag is opaquely copied
80 be_dws_le_to_cpu(compl, 4);
82 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
83 CQE_STATUS_COMPL_MASK;
85 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
88 opcode = resp_hdr->opcode;
89 subsystem = resp_hdr->subsystem;
92 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
93 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
94 (subsystem == CMD_SUBSYSTEM_COMMON)) {
95 adapter->flash_status = compl_status;
96 complete(&adapter->flash_compl);
99 if (compl_status == MCC_STATUS_SUCCESS) {
100 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
101 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
102 (subsystem == CMD_SUBSYSTEM_ETH)) {
103 be_parse_stats(adapter);
104 adapter->stats_cmd_sent = false;
106 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
107 subsystem == CMD_SUBSYSTEM_COMMON) {
108 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
110 adapter->drv_stats.be_on_die_temperature =
111 resp->on_die_temperature;
114 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
115 adapter->be_get_temp_freq = 0;
117 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
118 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n",
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
127 CQE_STATUS_EXTD_MASK;
128 dev_err(&adapter->pdev->dev,
129 "opcode %d-%d failed:status %d-%d\n",
130 opcode, subsystem, compl_status, extd_status);
137 /* Link state evt is a string of bytes; no need for endian swapping */
138 static void be_async_link_state_process(struct be_adapter *adapter,
139 struct be_async_event_link_state *evt)
141 /* When link status changes, link speed must be re-queried from FW */
142 adapter->phy.link_speed = -1;
144 /* Ignore physical link event */
145 if (lancer_chip(adapter) &&
146 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
149 /* For the initial link status do not rely on the ASYNC event as
150 * it may not be received in some cases.
152 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
153 be_link_status_update(adapter, evt->port_link_status);
156 /* Grp5 CoS Priority evt */
157 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
158 struct be_async_event_grp5_cos_priority *evt)
161 adapter->vlan_prio_bmap = evt->available_priority_bmap;
162 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
163 adapter->recommended_prio =
164 evt->reco_default_priority << VLAN_PRIO_SHIFT;
168 /* Grp5 QOS Speed evt */
169 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
170 struct be_async_event_grp5_qos_link_speed *evt)
172 if (evt->physical_port == adapter->port_num) {
173 /* qos_link_speed is in units of 10 Mbps */
174 adapter->phy.link_speed = evt->qos_link_speed * 10;
179 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
180 struct be_async_event_grp5_pvid_state *evt)
183 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
188 static void be_async_grp5_evt_process(struct be_adapter *adapter,
189 u32 trailer, struct be_mcc_compl *evt)
193 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
194 ASYNC_TRAILER_EVENT_TYPE_MASK;
196 switch (event_type) {
197 case ASYNC_EVENT_COS_PRIORITY:
198 be_async_grp5_cos_priority_process(adapter,
199 (struct be_async_event_grp5_cos_priority *)evt);
201 case ASYNC_EVENT_QOS_SPEED:
202 be_async_grp5_qos_speed_process(adapter,
203 (struct be_async_event_grp5_qos_link_speed *)evt);
205 case ASYNC_EVENT_PVID_STATE:
206 be_async_grp5_pvid_state_process(adapter,
207 (struct be_async_event_grp5_pvid_state *)evt);
210 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
215 static inline bool is_link_state_evt(u32 trailer)
217 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
218 ASYNC_TRAILER_EVENT_CODE_MASK) ==
219 ASYNC_EVENT_CODE_LINK_STATE;
222 static inline bool is_grp5_evt(u32 trailer)
224 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
225 ASYNC_TRAILER_EVENT_CODE_MASK) ==
226 ASYNC_EVENT_CODE_GRP_5);
229 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
231 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
232 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
234 if (be_mcc_compl_is_new(compl)) {
235 queue_tail_inc(mcc_cq);
241 void be_async_mcc_enable(struct be_adapter *adapter)
243 spin_lock_bh(&adapter->mcc_cq_lock);
245 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
246 adapter->mcc_obj.rearm_cq = true;
248 spin_unlock_bh(&adapter->mcc_cq_lock);
251 void be_async_mcc_disable(struct be_adapter *adapter)
253 adapter->mcc_obj.rearm_cq = false;
256 int be_process_mcc(struct be_adapter *adapter)
258 struct be_mcc_compl *compl;
259 int num = 0, status = 0;
260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
262 spin_lock(&adapter->mcc_cq_lock);
263 while ((compl = be_mcc_compl_get(adapter))) {
264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
265 /* Interpret flags as an async trailer */
266 if (is_link_state_evt(compl->flags))
267 be_async_link_state_process(adapter,
268 (struct be_async_event_link_state *) compl);
269 else if (is_grp5_evt(compl->flags))
270 be_async_grp5_evt_process(adapter,
271 compl->flags, compl);
272 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
273 status = be_mcc_compl_process(adapter, compl);
274 atomic_dec(&mcc_obj->q.used);
276 be_mcc_compl_use(compl);
281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
283 spin_unlock(&adapter->mcc_cq_lock);
287 /* Wait till no more pending mcc requests are present */
288 static int be_mcc_wait_compl(struct be_adapter *adapter)
290 #define mcc_timeout 120000 /* 12s timeout */
292 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
294 for (i = 0; i < mcc_timeout; i++) {
295 if (be_error(adapter))
299 status = be_process_mcc(adapter);
302 if (atomic_read(&mcc_obj->q.used) == 0)
306 if (i == mcc_timeout) {
307 dev_err(&adapter->pdev->dev, "FW not responding\n");
308 adapter->fw_timeout = true;
314 /* Notify MCC requests and wait for completion */
315 static int be_mcc_notify_wait(struct be_adapter *adapter)
318 struct be_mcc_wrb *wrb;
319 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
320 u16 index = mcc_obj->q.head;
321 struct be_cmd_resp_hdr *resp;
323 index_dec(&index, mcc_obj->q.len);
324 wrb = queue_index_node(&mcc_obj->q, index);
326 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
328 be_mcc_notify(adapter);
330 status = be_mcc_wait_compl(adapter);
334 status = resp->status;
339 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
345 if (be_error(adapter))
348 ready = ioread32(db);
349 if (ready == 0xffffffff)
352 ready &= MPU_MAILBOX_DB_RDY_MASK;
357 dev_err(&adapter->pdev->dev, "FW not responding\n");
358 adapter->fw_timeout = true;
359 be_detect_error(adapter);
371 * Insert the mailbox address into the doorbell in two steps
372 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
374 static int be_mbox_notify_wait(struct be_adapter *adapter)
378 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
379 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
380 struct be_mcc_mailbox *mbox = mbox_mem->va;
381 struct be_mcc_compl *compl = &mbox->compl;
383 /* wait for ready to be set */
384 status = be_mbox_db_ready_wait(adapter, db);
388 val |= MPU_MAILBOX_DB_HI_MASK;
389 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
390 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
393 /* wait for ready to be set */
394 status = be_mbox_db_ready_wait(adapter, db);
399 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
400 val |= (u32)(mbox_mem->dma >> 4) << 2;
403 status = be_mbox_db_ready_wait(adapter, db);
407 /* A cq entry has been made now */
408 if (be_mcc_compl_is_new(compl)) {
409 status = be_mcc_compl_process(adapter, &mbox->compl);
410 be_mcc_compl_use(compl);
414 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
420 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
424 if (lancer_chip(adapter))
425 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
427 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
429 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
430 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
436 int lancer_wait_ready(struct be_adapter *adapter)
438 #define SLIPORT_READY_TIMEOUT 30
442 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
443 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
444 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
450 if (i == SLIPORT_READY_TIMEOUT)
456 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
459 u32 sliport_status, err, reset_needed;
460 status = lancer_wait_ready(adapter);
462 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
463 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
464 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
465 if (err && reset_needed) {
466 iowrite32(SLI_PORT_CONTROL_IP_MASK,
467 adapter->db + SLIPORT_CONTROL_OFFSET);
469 /* check adapter has corrected the error */
470 status = lancer_wait_ready(adapter);
471 sliport_status = ioread32(adapter->db +
472 SLIPORT_STATUS_OFFSET);
473 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
474 SLIPORT_STATUS_RN_MASK);
475 if (status || sliport_status)
477 } else if (err || reset_needed) {
484 int be_fw_wait_ready(struct be_adapter *adapter)
487 int status, timeout = 0;
488 struct device *dev = &adapter->pdev->dev;
490 if (lancer_chip(adapter)) {
491 status = lancer_wait_ready(adapter);
496 status = be_POST_stage_get(adapter, &stage);
498 dev_err(dev, "POST error; stage=0x%x\n", stage);
500 } else if (stage != POST_STAGE_ARMFW_RDY) {
501 if (msleep_interruptible(2000)) {
502 dev_err(dev, "Waiting for POST aborted\n");
509 } while (timeout < 60);
511 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
516 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
518 return &wrb->payload.sgl[0];
522 /* Don't touch the hdr after it's prepared */
523 /* mem will be NULL for embedded commands */
524 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
525 u8 subsystem, u8 opcode, int cmd_len,
526 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
529 unsigned long addr = (unsigned long)req_hdr;
532 req_hdr->opcode = opcode;
533 req_hdr->subsystem = subsystem;
534 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
535 req_hdr->version = 0;
537 wrb->tag0 = req_addr & 0xFFFFFFFF;
538 wrb->tag1 = upper_32_bits(req_addr);
540 wrb->payload_length = cmd_len;
542 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
543 MCC_WRB_SGE_CNT_SHIFT;
544 sge = nonembedded_sgl(wrb);
545 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
546 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
547 sge->len = cpu_to_le32(mem->size);
549 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
550 be_dws_cpu_to_le(wrb, 8);
553 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
554 struct be_dma_mem *mem)
556 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
557 u64 dma = (u64)mem->dma;
559 for (i = 0; i < buf_pages; i++) {
560 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
561 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
566 /* Converts interrupt delay in microseconds to multiplier value */
567 static u32 eq_delay_to_mult(u32 usec_delay)
569 #define MAX_INTR_RATE 651042
570 const u32 round = 10;
576 u32 interrupt_rate = 1000000 / usec_delay;
577 /* Max delay, corresponding to the lowest interrupt rate */
578 if (interrupt_rate == 0)
581 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
582 multiplier /= interrupt_rate;
583 /* Round the multiplier to the closest value.*/
584 multiplier = (multiplier + round/2) / round;
585 multiplier = min(multiplier, (u32)1023);
591 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
593 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
594 struct be_mcc_wrb *wrb
595 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
596 memset(wrb, 0, sizeof(*wrb));
600 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
602 struct be_queue_info *mccq = &adapter->mcc_obj.q;
603 struct be_mcc_wrb *wrb;
605 if (atomic_read(&mccq->used) >= mccq->len) {
606 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
610 wrb = queue_head_node(mccq);
611 queue_head_inc(mccq);
612 atomic_inc(&mccq->used);
613 memset(wrb, 0, sizeof(*wrb));
617 /* Tell fw we're about to start firing cmds by writing a
618 * special pattern across the wrb hdr; uses mbox
620 int be_cmd_fw_init(struct be_adapter *adapter)
625 if (lancer_chip(adapter))
628 if (mutex_lock_interruptible(&adapter->mbox_lock))
631 wrb = (u8 *)wrb_from_mbox(adapter);
641 status = be_mbox_notify_wait(adapter);
643 mutex_unlock(&adapter->mbox_lock);
647 /* Tell fw we're done with firing cmds by writing a
648 * special pattern across the wrb hdr; uses mbox
650 int be_cmd_fw_clean(struct be_adapter *adapter)
655 if (lancer_chip(adapter))
658 if (mutex_lock_interruptible(&adapter->mbox_lock))
661 wrb = (u8 *)wrb_from_mbox(adapter);
671 status = be_mbox_notify_wait(adapter);
673 mutex_unlock(&adapter->mbox_lock);
677 int be_cmd_eq_create(struct be_adapter *adapter,
678 struct be_queue_info *eq, int eq_delay)
680 struct be_mcc_wrb *wrb;
681 struct be_cmd_req_eq_create *req;
682 struct be_dma_mem *q_mem = &eq->dma_mem;
685 if (mutex_lock_interruptible(&adapter->mbox_lock))
688 wrb = wrb_from_mbox(adapter);
689 req = embedded_payload(wrb);
691 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
692 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
694 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
696 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
698 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
699 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
700 __ilog2_u32(eq->len/256));
701 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
702 eq_delay_to_mult(eq_delay));
703 be_dws_cpu_to_le(req->context, sizeof(req->context));
705 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
707 status = be_mbox_notify_wait(adapter);
709 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
710 eq->id = le16_to_cpu(resp->eq_id);
714 mutex_unlock(&adapter->mbox_lock);
719 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
720 u8 type, bool permanent, u32 if_handle, u32 pmac_id)
722 struct be_mcc_wrb *wrb;
723 struct be_cmd_req_mac_query *req;
726 spin_lock_bh(&adapter->mcc_lock);
728 wrb = wrb_from_mccq(adapter);
733 req = embedded_payload(wrb);
735 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
736 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
741 req->if_id = cpu_to_le16((u16) if_handle);
742 req->pmac_id = cpu_to_le32(pmac_id);
746 status = be_mcc_notify_wait(adapter);
748 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
749 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
753 spin_unlock_bh(&adapter->mcc_lock);
757 /* Uses synchronous MCCQ */
758 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
759 u32 if_id, u32 *pmac_id, u32 domain)
761 struct be_mcc_wrb *wrb;
762 struct be_cmd_req_pmac_add *req;
765 spin_lock_bh(&adapter->mcc_lock);
767 wrb = wrb_from_mccq(adapter);
772 req = embedded_payload(wrb);
774 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
775 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
777 req->hdr.domain = domain;
778 req->if_id = cpu_to_le32(if_id);
779 memcpy(req->mac_address, mac_addr, ETH_ALEN);
781 status = be_mcc_notify_wait(adapter);
783 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
784 *pmac_id = le32_to_cpu(resp->pmac_id);
788 spin_unlock_bh(&adapter->mcc_lock);
790 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
796 /* Uses synchronous MCCQ */
797 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
799 struct be_mcc_wrb *wrb;
800 struct be_cmd_req_pmac_del *req;
806 spin_lock_bh(&adapter->mcc_lock);
808 wrb = wrb_from_mccq(adapter);
813 req = embedded_payload(wrb);
815 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
816 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
818 req->hdr.domain = dom;
819 req->if_id = cpu_to_le32(if_id);
820 req->pmac_id = cpu_to_le32(pmac_id);
822 status = be_mcc_notify_wait(adapter);
825 spin_unlock_bh(&adapter->mcc_lock);
830 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
831 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
833 struct be_mcc_wrb *wrb;
834 struct be_cmd_req_cq_create *req;
835 struct be_dma_mem *q_mem = &cq->dma_mem;
839 if (mutex_lock_interruptible(&adapter->mbox_lock))
842 wrb = wrb_from_mbox(adapter);
843 req = embedded_payload(wrb);
844 ctxt = &req->context;
846 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
847 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
849 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
850 if (lancer_chip(adapter)) {
851 req->hdr.version = 2;
852 req->page_size = 1; /* 1 for 4K */
853 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
855 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
856 __ilog2_u32(cq->len/256));
857 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
858 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
860 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
863 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
865 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
867 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
868 __ilog2_u32(cq->len/256));
869 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
870 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
871 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
874 be_dws_cpu_to_le(ctxt, sizeof(req->context));
876 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
878 status = be_mbox_notify_wait(adapter);
880 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
881 cq->id = le16_to_cpu(resp->cq_id);
885 mutex_unlock(&adapter->mbox_lock);
890 static u32 be_encoded_q_len(int q_len)
892 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
893 if (len_encoded == 16)
898 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
899 struct be_queue_info *mccq,
900 struct be_queue_info *cq)
902 struct be_mcc_wrb *wrb;
903 struct be_cmd_req_mcc_ext_create *req;
904 struct be_dma_mem *q_mem = &mccq->dma_mem;
908 if (mutex_lock_interruptible(&adapter->mbox_lock))
911 wrb = wrb_from_mbox(adapter);
912 req = embedded_payload(wrb);
913 ctxt = &req->context;
915 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
916 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
918 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
919 if (lancer_chip(adapter)) {
920 req->hdr.version = 1;
921 req->cq_id = cpu_to_le16(cq->id);
923 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
924 be_encoded_q_len(mccq->len));
925 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
926 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
928 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
932 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
933 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
934 be_encoded_q_len(mccq->len));
935 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
938 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
939 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
940 be_dws_cpu_to_le(ctxt, sizeof(req->context));
942 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
944 status = be_mbox_notify_wait(adapter);
946 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
947 mccq->id = le16_to_cpu(resp->id);
948 mccq->created = true;
950 mutex_unlock(&adapter->mbox_lock);
955 int be_cmd_mccq_org_create(struct be_adapter *adapter,
956 struct be_queue_info *mccq,
957 struct be_queue_info *cq)
959 struct be_mcc_wrb *wrb;
960 struct be_cmd_req_mcc_create *req;
961 struct be_dma_mem *q_mem = &mccq->dma_mem;
965 if (mutex_lock_interruptible(&adapter->mbox_lock))
968 wrb = wrb_from_mbox(adapter);
969 req = embedded_payload(wrb);
970 ctxt = &req->context;
972 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
973 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
975 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
977 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
978 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
979 be_encoded_q_len(mccq->len));
980 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
982 be_dws_cpu_to_le(ctxt, sizeof(req->context));
984 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
986 status = be_mbox_notify_wait(adapter);
988 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
989 mccq->id = le16_to_cpu(resp->id);
990 mccq->created = true;
993 mutex_unlock(&adapter->mbox_lock);
997 int be_cmd_mccq_create(struct be_adapter *adapter,
998 struct be_queue_info *mccq,
999 struct be_queue_info *cq)
1003 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1004 if (status && !lancer_chip(adapter)) {
1005 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1006 "or newer to avoid conflicting priorities between NIC "
1007 "and FCoE traffic");
1008 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1013 int be_cmd_txq_create(struct be_adapter *adapter,
1014 struct be_queue_info *txq,
1015 struct be_queue_info *cq)
1017 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_eth_tx_create *req;
1019 struct be_dma_mem *q_mem = &txq->dma_mem;
1023 spin_lock_bh(&adapter->mcc_lock);
1025 wrb = wrb_from_mccq(adapter);
1031 req = embedded_payload(wrb);
1032 ctxt = &req->context;
1034 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1035 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1037 if (lancer_chip(adapter)) {
1038 req->hdr.version = 1;
1039 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1040 adapter->if_handle);
1043 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1044 req->ulp_num = BE_ULP1_NUM;
1045 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1047 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1048 be_encoded_q_len(txq->len));
1049 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1050 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1052 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1054 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1056 status = be_mcc_notify_wait(adapter);
1058 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1059 txq->id = le16_to_cpu(resp->cid);
1060 txq->created = true;
1064 spin_unlock_bh(&adapter->mcc_lock);
1070 int be_cmd_rxq_create(struct be_adapter *adapter,
1071 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1072 u32 if_id, u32 rss, u8 *rss_id)
1074 struct be_mcc_wrb *wrb;
1075 struct be_cmd_req_eth_rx_create *req;
1076 struct be_dma_mem *q_mem = &rxq->dma_mem;
1079 spin_lock_bh(&adapter->mcc_lock);
1081 wrb = wrb_from_mccq(adapter);
1086 req = embedded_payload(wrb);
1088 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1089 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1091 req->cq_id = cpu_to_le16(cq_id);
1092 req->frag_size = fls(frag_size) - 1;
1094 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1095 req->interface_id = cpu_to_le32(if_id);
1096 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1097 req->rss_queue = cpu_to_le32(rss);
1099 status = be_mcc_notify_wait(adapter);
1101 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1102 rxq->id = le16_to_cpu(resp->id);
1103 rxq->created = true;
1104 *rss_id = resp->rss_id;
1108 spin_unlock_bh(&adapter->mcc_lock);
1112 /* Generic destroyer function for all types of queues
1115 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1118 struct be_mcc_wrb *wrb;
1119 struct be_cmd_req_q_destroy *req;
1120 u8 subsys = 0, opcode = 0;
1123 if (mutex_lock_interruptible(&adapter->mbox_lock))
1126 wrb = wrb_from_mbox(adapter);
1127 req = embedded_payload(wrb);
1129 switch (queue_type) {
1131 subsys = CMD_SUBSYSTEM_COMMON;
1132 opcode = OPCODE_COMMON_EQ_DESTROY;
1135 subsys = CMD_SUBSYSTEM_COMMON;
1136 opcode = OPCODE_COMMON_CQ_DESTROY;
1139 subsys = CMD_SUBSYSTEM_ETH;
1140 opcode = OPCODE_ETH_TX_DESTROY;
1143 subsys = CMD_SUBSYSTEM_ETH;
1144 opcode = OPCODE_ETH_RX_DESTROY;
1147 subsys = CMD_SUBSYSTEM_COMMON;
1148 opcode = OPCODE_COMMON_MCC_DESTROY;
1154 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1156 req->id = cpu_to_le16(q->id);
1158 status = be_mbox_notify_wait(adapter);
1162 mutex_unlock(&adapter->mbox_lock);
1167 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1169 struct be_mcc_wrb *wrb;
1170 struct be_cmd_req_q_destroy *req;
1173 spin_lock_bh(&adapter->mcc_lock);
1175 wrb = wrb_from_mccq(adapter);
1180 req = embedded_payload(wrb);
1182 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1183 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1184 req->id = cpu_to_le16(q->id);
1186 status = be_mcc_notify_wait(adapter);
1191 spin_unlock_bh(&adapter->mcc_lock);
1195 /* Create an rx filtering policy configuration on an i/f
1198 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1199 u32 *if_handle, u32 domain)
1201 struct be_mcc_wrb *wrb;
1202 struct be_cmd_req_if_create *req;
1205 spin_lock_bh(&adapter->mcc_lock);
1207 wrb = wrb_from_mccq(adapter);
1212 req = embedded_payload(wrb);
1214 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1215 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1216 req->hdr.domain = domain;
1217 req->capability_flags = cpu_to_le32(cap_flags);
1218 req->enable_flags = cpu_to_le32(en_flags);
1220 req->pmac_invalid = true;
1222 status = be_mcc_notify_wait(adapter);
1224 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1225 *if_handle = le32_to_cpu(resp->interface_id);
1229 spin_unlock_bh(&adapter->mcc_lock);
1234 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1236 struct be_mcc_wrb *wrb;
1237 struct be_cmd_req_if_destroy *req;
1240 if (interface_id == -1)
1243 spin_lock_bh(&adapter->mcc_lock);
1245 wrb = wrb_from_mccq(adapter);
1250 req = embedded_payload(wrb);
1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1253 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1254 req->hdr.domain = domain;
1255 req->interface_id = cpu_to_le32(interface_id);
1257 status = be_mcc_notify_wait(adapter);
1259 spin_unlock_bh(&adapter->mcc_lock);
1263 /* Get stats is a non embedded command: the request is not embedded inside
1264 * WRB but is a separate dma memory block
1265 * Uses asynchronous MCC
1267 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1269 struct be_mcc_wrb *wrb;
1270 struct be_cmd_req_hdr *hdr;
1273 spin_lock_bh(&adapter->mcc_lock);
1275 wrb = wrb_from_mccq(adapter);
1280 hdr = nonemb_cmd->va;
1282 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1283 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1285 if (adapter->generation == BE_GEN3)
1288 be_mcc_notify(adapter);
1289 adapter->stats_cmd_sent = true;
1292 spin_unlock_bh(&adapter->mcc_lock);
1297 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1298 struct be_dma_mem *nonemb_cmd)
1301 struct be_mcc_wrb *wrb;
1302 struct lancer_cmd_req_pport_stats *req;
1305 spin_lock_bh(&adapter->mcc_lock);
1307 wrb = wrb_from_mccq(adapter);
1312 req = nonemb_cmd->va;
1314 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1315 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1318 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1319 req->cmd_params.params.reset_stats = 0;
1321 be_mcc_notify(adapter);
1322 adapter->stats_cmd_sent = true;
1325 spin_unlock_bh(&adapter->mcc_lock);
1329 /* Uses synchronous mcc */
1330 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1331 u16 *link_speed, u8 *link_status, u32 dom)
1333 struct be_mcc_wrb *wrb;
1334 struct be_cmd_req_link_status *req;
1337 spin_lock_bh(&adapter->mcc_lock);
1340 *link_status = LINK_DOWN;
1342 wrb = wrb_from_mccq(adapter);
1347 req = embedded_payload(wrb);
1349 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1350 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1352 if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1353 req->hdr.version = 1;
1355 req->hdr.domain = dom;
1357 status = be_mcc_notify_wait(adapter);
1359 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1360 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1362 *link_speed = le16_to_cpu(resp->link_speed);
1364 *mac_speed = resp->mac_speed;
1367 *link_status = resp->logical_link_status;
1371 spin_unlock_bh(&adapter->mcc_lock);
1375 /* Uses synchronous mcc */
1376 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1378 struct be_mcc_wrb *wrb;
1379 struct be_cmd_req_get_cntl_addnl_attribs *req;
1382 spin_lock_bh(&adapter->mcc_lock);
1384 wrb = wrb_from_mccq(adapter);
1389 req = embedded_payload(wrb);
1391 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1392 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1395 be_mcc_notify(adapter);
1398 spin_unlock_bh(&adapter->mcc_lock);
1402 /* Uses synchronous mcc */
1403 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1405 struct be_mcc_wrb *wrb;
1406 struct be_cmd_req_get_fat *req;
1409 spin_lock_bh(&adapter->mcc_lock);
1411 wrb = wrb_from_mccq(adapter);
1416 req = embedded_payload(wrb);
1418 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1419 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1420 req->fat_operation = cpu_to_le32(QUERY_FAT);
1421 status = be_mcc_notify_wait(adapter);
1423 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1424 if (log_size && resp->log_size)
1425 *log_size = le32_to_cpu(resp->log_size) -
1429 spin_unlock_bh(&adapter->mcc_lock);
1433 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1435 struct be_dma_mem get_fat_cmd;
1436 struct be_mcc_wrb *wrb;
1437 struct be_cmd_req_get_fat *req;
1438 u32 offset = 0, total_size, buf_size,
1439 log_offset = sizeof(u32), payload_len;
1445 total_size = buf_len;
1447 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1448 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1451 if (!get_fat_cmd.va) {
1453 dev_err(&adapter->pdev->dev,
1454 "Memory allocation failure while retrieving FAT data\n");
1458 spin_lock_bh(&adapter->mcc_lock);
1460 while (total_size) {
1461 buf_size = min(total_size, (u32)60*1024);
1462 total_size -= buf_size;
1464 wrb = wrb_from_mccq(adapter);
1469 req = get_fat_cmd.va;
1471 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1472 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1473 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1476 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1477 req->read_log_offset = cpu_to_le32(log_offset);
1478 req->read_log_length = cpu_to_le32(buf_size);
1479 req->data_buffer_size = cpu_to_le32(buf_size);
1481 status = be_mcc_notify_wait(adapter);
1483 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1484 memcpy(buf + offset,
1486 le32_to_cpu(resp->read_log_length));
1488 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1492 log_offset += buf_size;
1495 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1498 spin_unlock_bh(&adapter->mcc_lock);
1501 /* Uses synchronous mcc */
1502 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1505 struct be_mcc_wrb *wrb;
1506 struct be_cmd_req_get_fw_version *req;
1509 spin_lock_bh(&adapter->mcc_lock);
1511 wrb = wrb_from_mccq(adapter);
1517 req = embedded_payload(wrb);
1519 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1520 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1521 status = be_mcc_notify_wait(adapter);
1523 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1524 strcpy(fw_ver, resp->firmware_version_string);
1526 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1529 spin_unlock_bh(&adapter->mcc_lock);
1533 /* set the EQ delay interval of an EQ to specified value
1536 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1538 struct be_mcc_wrb *wrb;
1539 struct be_cmd_req_modify_eq_delay *req;
1542 spin_lock_bh(&adapter->mcc_lock);
1544 wrb = wrb_from_mccq(adapter);
1549 req = embedded_payload(wrb);
1551 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1552 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1554 req->num_eq = cpu_to_le32(1);
1555 req->delay[0].eq_id = cpu_to_le32(eq_id);
1556 req->delay[0].phase = 0;
1557 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1559 be_mcc_notify(adapter);
1562 spin_unlock_bh(&adapter->mcc_lock);
1566 /* Uses sycnhronous mcc */
1567 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1568 u32 num, bool untagged, bool promiscuous)
1570 struct be_mcc_wrb *wrb;
1571 struct be_cmd_req_vlan_config *req;
1574 spin_lock_bh(&adapter->mcc_lock);
1576 wrb = wrb_from_mccq(adapter);
1581 req = embedded_payload(wrb);
1583 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1584 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1586 req->interface_id = if_id;
1587 req->promiscuous = promiscuous;
1588 req->untagged = untagged;
1589 req->num_vlan = num;
1591 memcpy(req->normal_vlan, vtag_array,
1592 req->num_vlan * sizeof(vtag_array[0]));
1595 status = be_mcc_notify_wait(adapter);
1598 spin_unlock_bh(&adapter->mcc_lock);
1602 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1604 struct be_mcc_wrb *wrb;
1605 struct be_dma_mem *mem = &adapter->rx_filter;
1606 struct be_cmd_req_rx_filter *req = mem->va;
1609 spin_lock_bh(&adapter->mcc_lock);
1611 wrb = wrb_from_mccq(adapter);
1616 memset(req, 0, sizeof(*req));
1617 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1618 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1621 req->if_id = cpu_to_le32(adapter->if_handle);
1622 if (flags & IFF_PROMISC) {
1623 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1624 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1626 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1627 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1628 } else if (flags & IFF_ALLMULTI) {
1629 req->if_flags_mask = req->if_flags =
1630 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1632 struct netdev_hw_addr *ha;
1635 req->if_flags_mask = req->if_flags =
1636 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1638 /* Reset mcast promisc mode if already set by setting mask
1639 * and not setting flags field
1641 if (!lancer_chip(adapter) || be_physfn(adapter))
1642 req->if_flags_mask |=
1643 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1645 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1646 netdev_for_each_mc_addr(ha, adapter->netdev)
1647 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1650 status = be_mcc_notify_wait(adapter);
1652 spin_unlock_bh(&adapter->mcc_lock);
1656 /* Uses synchrounous mcc */
1657 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1659 struct be_mcc_wrb *wrb;
1660 struct be_cmd_req_set_flow_control *req;
1663 spin_lock_bh(&adapter->mcc_lock);
1665 wrb = wrb_from_mccq(adapter);
1670 req = embedded_payload(wrb);
1672 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1673 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1675 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1676 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1678 status = be_mcc_notify_wait(adapter);
1681 spin_unlock_bh(&adapter->mcc_lock);
1686 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1688 struct be_mcc_wrb *wrb;
1689 struct be_cmd_req_get_flow_control *req;
1692 spin_lock_bh(&adapter->mcc_lock);
1694 wrb = wrb_from_mccq(adapter);
1699 req = embedded_payload(wrb);
1701 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1702 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1704 status = be_mcc_notify_wait(adapter);
1706 struct be_cmd_resp_get_flow_control *resp =
1707 embedded_payload(wrb);
1708 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1709 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1713 spin_unlock_bh(&adapter->mcc_lock);
1718 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1719 u32 *mode, u32 *caps)
1721 struct be_mcc_wrb *wrb;
1722 struct be_cmd_req_query_fw_cfg *req;
1725 if (mutex_lock_interruptible(&adapter->mbox_lock))
1728 wrb = wrb_from_mbox(adapter);
1729 req = embedded_payload(wrb);
1731 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1732 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1734 status = be_mbox_notify_wait(adapter);
1736 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1737 *port_num = le32_to_cpu(resp->phys_port);
1738 *mode = le32_to_cpu(resp->function_mode);
1739 *caps = le32_to_cpu(resp->function_caps);
1742 mutex_unlock(&adapter->mbox_lock);
1747 int be_cmd_reset_function(struct be_adapter *adapter)
1749 struct be_mcc_wrb *wrb;
1750 struct be_cmd_req_hdr *req;
1753 if (lancer_chip(adapter)) {
1754 status = lancer_wait_ready(adapter);
1756 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1757 adapter->db + SLIPORT_CONTROL_OFFSET);
1758 status = lancer_test_and_set_rdy_state(adapter);
1761 dev_err(&adapter->pdev->dev,
1762 "Adapter in non recoverable error\n");
1767 if (mutex_lock_interruptible(&adapter->mbox_lock))
1770 wrb = wrb_from_mbox(adapter);
1771 req = embedded_payload(wrb);
1773 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1774 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1776 status = be_mbox_notify_wait(adapter);
1778 mutex_unlock(&adapter->mbox_lock);
1782 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1784 struct be_mcc_wrb *wrb;
1785 struct be_cmd_req_rss_config *req;
1786 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1787 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1788 0x3ea83c02, 0x4a110304};
1791 if (mutex_lock_interruptible(&adapter->mbox_lock))
1794 wrb = wrb_from_mbox(adapter);
1795 req = embedded_payload(wrb);
1797 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1798 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1800 req->if_id = cpu_to_le32(adapter->if_handle);
1801 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1802 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1804 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1805 req->hdr.version = 1;
1806 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1807 RSS_ENABLE_UDP_IPV6);
1810 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1811 memcpy(req->cpu_table, rsstable, table_size);
1812 memcpy(req->hash, myhash, sizeof(myhash));
1813 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1815 status = be_mbox_notify_wait(adapter);
1817 mutex_unlock(&adapter->mbox_lock);
1822 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1823 u8 bcn, u8 sts, u8 state)
1825 struct be_mcc_wrb *wrb;
1826 struct be_cmd_req_enable_disable_beacon *req;
1829 spin_lock_bh(&adapter->mcc_lock);
1831 wrb = wrb_from_mccq(adapter);
1836 req = embedded_payload(wrb);
1838 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1839 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1841 req->port_num = port_num;
1842 req->beacon_state = state;
1843 req->beacon_duration = bcn;
1844 req->status_duration = sts;
1846 status = be_mcc_notify_wait(adapter);
1849 spin_unlock_bh(&adapter->mcc_lock);
1854 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1856 struct be_mcc_wrb *wrb;
1857 struct be_cmd_req_get_beacon_state *req;
1860 spin_lock_bh(&adapter->mcc_lock);
1862 wrb = wrb_from_mccq(adapter);
1867 req = embedded_payload(wrb);
1869 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1870 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1872 req->port_num = port_num;
1874 status = be_mcc_notify_wait(adapter);
1876 struct be_cmd_resp_get_beacon_state *resp =
1877 embedded_payload(wrb);
1878 *state = resp->beacon_state;
1882 spin_unlock_bh(&adapter->mcc_lock);
1886 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1887 u32 data_size, u32 data_offset,
1888 const char *obj_name, u32 *data_written,
1889 u8 *change_status, u8 *addn_status)
1891 struct be_mcc_wrb *wrb;
1892 struct lancer_cmd_req_write_object *req;
1893 struct lancer_cmd_resp_write_object *resp;
1897 spin_lock_bh(&adapter->mcc_lock);
1898 adapter->flash_status = 0;
1900 wrb = wrb_from_mccq(adapter);
1906 req = embedded_payload(wrb);
1908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1909 OPCODE_COMMON_WRITE_OBJECT,
1910 sizeof(struct lancer_cmd_req_write_object), wrb,
1913 ctxt = &req->context;
1914 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1915 write_length, ctxt, data_size);
1918 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1921 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1924 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1925 req->write_offset = cpu_to_le32(data_offset);
1926 strcpy(req->object_name, obj_name);
1927 req->descriptor_count = cpu_to_le32(1);
1928 req->buf_len = cpu_to_le32(data_size);
1929 req->addr_low = cpu_to_le32((cmd->dma +
1930 sizeof(struct lancer_cmd_req_write_object))
1932 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1933 sizeof(struct lancer_cmd_req_write_object)));
1935 be_mcc_notify(adapter);
1936 spin_unlock_bh(&adapter->mcc_lock);
1938 if (!wait_for_completion_timeout(&adapter->flash_compl,
1939 msecs_to_jiffies(30000)))
1942 status = adapter->flash_status;
1944 resp = embedded_payload(wrb);
1946 *data_written = le32_to_cpu(resp->actual_write_len);
1947 *change_status = resp->change_status;
1949 *addn_status = resp->additional_status;
1955 spin_unlock_bh(&adapter->mcc_lock);
1959 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1960 u32 data_size, u32 data_offset, const char *obj_name,
1961 u32 *data_read, u32 *eof, u8 *addn_status)
1963 struct be_mcc_wrb *wrb;
1964 struct lancer_cmd_req_read_object *req;
1965 struct lancer_cmd_resp_read_object *resp;
1968 spin_lock_bh(&adapter->mcc_lock);
1970 wrb = wrb_from_mccq(adapter);
1976 req = embedded_payload(wrb);
1978 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1979 OPCODE_COMMON_READ_OBJECT,
1980 sizeof(struct lancer_cmd_req_read_object), wrb,
1983 req->desired_read_len = cpu_to_le32(data_size);
1984 req->read_offset = cpu_to_le32(data_offset);
1985 strcpy(req->object_name, obj_name);
1986 req->descriptor_count = cpu_to_le32(1);
1987 req->buf_len = cpu_to_le32(data_size);
1988 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1989 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1991 status = be_mcc_notify_wait(adapter);
1993 resp = embedded_payload(wrb);
1995 *data_read = le32_to_cpu(resp->actual_read_len);
1996 *eof = le32_to_cpu(resp->eof);
1998 *addn_status = resp->additional_status;
2002 spin_unlock_bh(&adapter->mcc_lock);
2006 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2007 u32 flash_type, u32 flash_opcode, u32 buf_size)
2009 struct be_mcc_wrb *wrb;
2010 struct be_cmd_write_flashrom *req;
2013 spin_lock_bh(&adapter->mcc_lock);
2014 adapter->flash_status = 0;
2016 wrb = wrb_from_mccq(adapter);
2023 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2024 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2026 req->params.op_type = cpu_to_le32(flash_type);
2027 req->params.op_code = cpu_to_le32(flash_opcode);
2028 req->params.data_buf_size = cpu_to_le32(buf_size);
2030 be_mcc_notify(adapter);
2031 spin_unlock_bh(&adapter->mcc_lock);
2033 if (!wait_for_completion_timeout(&adapter->flash_compl,
2034 msecs_to_jiffies(40000)))
2037 status = adapter->flash_status;
2042 spin_unlock_bh(&adapter->mcc_lock);
2046 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2049 struct be_mcc_wrb *wrb;
2050 struct be_cmd_write_flashrom *req;
2053 spin_lock_bh(&adapter->mcc_lock);
2055 wrb = wrb_from_mccq(adapter);
2060 req = embedded_payload(wrb);
2062 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2063 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
2065 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2066 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2067 req->params.offset = cpu_to_le32(offset);
2068 req->params.data_buf_size = cpu_to_le32(0x4);
2070 status = be_mcc_notify_wait(adapter);
2072 memcpy(flashed_crc, req->params.data_buf, 4);
2075 spin_unlock_bh(&adapter->mcc_lock);
2079 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2080 struct be_dma_mem *nonemb_cmd)
2082 struct be_mcc_wrb *wrb;
2083 struct be_cmd_req_acpi_wol_magic_config *req;
2086 spin_lock_bh(&adapter->mcc_lock);
2088 wrb = wrb_from_mccq(adapter);
2093 req = nonemb_cmd->va;
2095 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2096 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2098 memcpy(req->magic_mac, mac, ETH_ALEN);
2100 status = be_mcc_notify_wait(adapter);
2103 spin_unlock_bh(&adapter->mcc_lock);
2107 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2108 u8 loopback_type, u8 enable)
2110 struct be_mcc_wrb *wrb;
2111 struct be_cmd_req_set_lmode *req;
2114 spin_lock_bh(&adapter->mcc_lock);
2116 wrb = wrb_from_mccq(adapter);
2122 req = embedded_payload(wrb);
2124 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2125 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2128 req->src_port = port_num;
2129 req->dest_port = port_num;
2130 req->loopback_type = loopback_type;
2131 req->loopback_state = enable;
2133 status = be_mcc_notify_wait(adapter);
2135 spin_unlock_bh(&adapter->mcc_lock);
2139 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2140 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2142 struct be_mcc_wrb *wrb;
2143 struct be_cmd_req_loopback_test *req;
2146 spin_lock_bh(&adapter->mcc_lock);
2148 wrb = wrb_from_mccq(adapter);
2154 req = embedded_payload(wrb);
2156 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2157 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2158 req->hdr.timeout = cpu_to_le32(4);
2160 req->pattern = cpu_to_le64(pattern);
2161 req->src_port = cpu_to_le32(port_num);
2162 req->dest_port = cpu_to_le32(port_num);
2163 req->pkt_size = cpu_to_le32(pkt_size);
2164 req->num_pkts = cpu_to_le32(num_pkts);
2165 req->loopback_type = cpu_to_le32(loopback_type);
2167 status = be_mcc_notify_wait(adapter);
2169 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2170 status = le32_to_cpu(resp->status);
2174 spin_unlock_bh(&adapter->mcc_lock);
2178 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2179 u32 byte_cnt, struct be_dma_mem *cmd)
2181 struct be_mcc_wrb *wrb;
2182 struct be_cmd_req_ddrdma_test *req;
2186 spin_lock_bh(&adapter->mcc_lock);
2188 wrb = wrb_from_mccq(adapter);
2194 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2195 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2197 req->pattern = cpu_to_le64(pattern);
2198 req->byte_count = cpu_to_le32(byte_cnt);
2199 for (i = 0; i < byte_cnt; i++) {
2200 req->snd_buff[i] = (u8)(pattern >> (j*8));
2206 status = be_mcc_notify_wait(adapter);
2209 struct be_cmd_resp_ddrdma_test *resp;
2211 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2218 spin_unlock_bh(&adapter->mcc_lock);
2222 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2223 struct be_dma_mem *nonemb_cmd)
2225 struct be_mcc_wrb *wrb;
2226 struct be_cmd_req_seeprom_read *req;
2230 spin_lock_bh(&adapter->mcc_lock);
2232 wrb = wrb_from_mccq(adapter);
2237 req = nonemb_cmd->va;
2238 sge = nonembedded_sgl(wrb);
2240 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2241 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2244 status = be_mcc_notify_wait(adapter);
2247 spin_unlock_bh(&adapter->mcc_lock);
2251 int be_cmd_get_phy_info(struct be_adapter *adapter)
2253 struct be_mcc_wrb *wrb;
2254 struct be_cmd_req_get_phy_info *req;
2255 struct be_dma_mem cmd;
2258 spin_lock_bh(&adapter->mcc_lock);
2260 wrb = wrb_from_mccq(adapter);
2265 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2266 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2269 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2276 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2277 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2280 status = be_mcc_notify_wait(adapter);
2282 struct be_phy_info *resp_phy_info =
2283 cmd.va + sizeof(struct be_cmd_req_hdr);
2284 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2285 adapter->phy.interface_type =
2286 le16_to_cpu(resp_phy_info->interface_type);
2287 adapter->phy.auto_speeds_supported =
2288 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2289 adapter->phy.fixed_speeds_supported =
2290 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2291 adapter->phy.misc_params =
2292 le32_to_cpu(resp_phy_info->misc_params);
2294 pci_free_consistent(adapter->pdev, cmd.size,
2297 spin_unlock_bh(&adapter->mcc_lock);
2301 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2303 struct be_mcc_wrb *wrb;
2304 struct be_cmd_req_set_qos *req;
2307 spin_lock_bh(&adapter->mcc_lock);
2309 wrb = wrb_from_mccq(adapter);
2315 req = embedded_payload(wrb);
2317 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2318 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2320 req->hdr.domain = domain;
2321 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2322 req->max_bps_nic = cpu_to_le32(bps);
2324 status = be_mcc_notify_wait(adapter);
2327 spin_unlock_bh(&adapter->mcc_lock);
2331 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2333 struct be_mcc_wrb *wrb;
2334 struct be_cmd_req_cntl_attribs *req;
2335 struct be_cmd_resp_cntl_attribs *resp;
2337 int payload_len = max(sizeof(*req), sizeof(*resp));
2338 struct mgmt_controller_attrib *attribs;
2339 struct be_dma_mem attribs_cmd;
2341 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2342 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2343 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2345 if (!attribs_cmd.va) {
2346 dev_err(&adapter->pdev->dev,
2347 "Memory allocation failure\n");
2351 if (mutex_lock_interruptible(&adapter->mbox_lock))
2354 wrb = wrb_from_mbox(adapter);
2359 req = attribs_cmd.va;
2361 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2362 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2365 status = be_mbox_notify_wait(adapter);
2367 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2368 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2372 mutex_unlock(&adapter->mbox_lock);
2373 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2379 int be_cmd_req_native_mode(struct be_adapter *adapter)
2381 struct be_mcc_wrb *wrb;
2382 struct be_cmd_req_set_func_cap *req;
2385 if (mutex_lock_interruptible(&adapter->mbox_lock))
2388 wrb = wrb_from_mbox(adapter);
2394 req = embedded_payload(wrb);
2396 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2397 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2399 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2400 CAPABILITY_BE3_NATIVE_ERX_API);
2401 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2403 status = be_mbox_notify_wait(adapter);
2405 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2406 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2407 CAPABILITY_BE3_NATIVE_ERX_API;
2410 mutex_unlock(&adapter->mbox_lock);
2414 /* Uses synchronous MCCQ */
2415 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2416 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2418 struct be_mcc_wrb *wrb;
2419 struct be_cmd_req_get_mac_list *req;
2422 struct be_dma_mem get_mac_list_cmd;
2425 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2426 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2427 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2428 get_mac_list_cmd.size,
2429 &get_mac_list_cmd.dma);
2431 if (!get_mac_list_cmd.va) {
2432 dev_err(&adapter->pdev->dev,
2433 "Memory allocation failure during GET_MAC_LIST\n");
2437 spin_lock_bh(&adapter->mcc_lock);
2439 wrb = wrb_from_mccq(adapter);
2445 req = get_mac_list_cmd.va;
2447 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2448 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2449 wrb, &get_mac_list_cmd);
2451 req->hdr.domain = domain;
2452 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2453 req->perm_override = 1;
2455 status = be_mcc_notify_wait(adapter);
2457 struct be_cmd_resp_get_mac_list *resp =
2458 get_mac_list_cmd.va;
2459 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2460 /* Mac list returned could contain one or more active mac_ids
2461 * or one or more true or pseudo permanant mac addresses.
2462 * If an active mac_id is present, return first active mac_id
2465 for (i = 0; i < mac_count; i++) {
2466 struct get_list_macaddr *mac_entry;
2470 mac_entry = &resp->macaddr_list[i];
2471 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2472 /* mac_id is a 32 bit value and mac_addr size
2475 if (mac_addr_size == sizeof(u32)) {
2476 *pmac_id_active = true;
2477 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2478 *pmac_id = le32_to_cpu(mac_id);
2482 /* If no active mac_id found, return first mac addr */
2483 *pmac_id_active = false;
2484 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2489 spin_unlock_bh(&adapter->mcc_lock);
2490 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2491 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2495 /* Uses synchronous MCCQ */
2496 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2497 u8 mac_count, u32 domain)
2499 struct be_mcc_wrb *wrb;
2500 struct be_cmd_req_set_mac_list *req;
2502 struct be_dma_mem cmd;
2504 memset(&cmd, 0, sizeof(struct be_dma_mem));
2505 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2506 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2507 &cmd.dma, GFP_KERNEL);
2509 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2513 spin_lock_bh(&adapter->mcc_lock);
2515 wrb = wrb_from_mccq(adapter);
2522 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2523 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2526 req->hdr.domain = domain;
2527 req->mac_count = mac_count;
2529 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2531 status = be_mcc_notify_wait(adapter);
2534 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2536 spin_unlock_bh(&adapter->mcc_lock);
2540 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2541 u32 domain, u16 intf_id)
2543 struct be_mcc_wrb *wrb;
2544 struct be_cmd_req_set_hsw_config *req;
2548 spin_lock_bh(&adapter->mcc_lock);
2550 wrb = wrb_from_mccq(adapter);
2556 req = embedded_payload(wrb);
2557 ctxt = &req->context;
2559 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2560 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2562 req->hdr.domain = domain;
2563 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2565 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2566 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2569 be_dws_cpu_to_le(req->context, sizeof(req->context));
2570 status = be_mcc_notify_wait(adapter);
2573 spin_unlock_bh(&adapter->mcc_lock);
2577 /* Get Hyper switch config */
2578 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2579 u32 domain, u16 intf_id)
2581 struct be_mcc_wrb *wrb;
2582 struct be_cmd_req_get_hsw_config *req;
2587 spin_lock_bh(&adapter->mcc_lock);
2589 wrb = wrb_from_mccq(adapter);
2595 req = embedded_payload(wrb);
2596 ctxt = &req->context;
2598 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2599 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2601 req->hdr.domain = domain;
2602 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2604 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2605 be_dws_cpu_to_le(req->context, sizeof(req->context));
2607 status = be_mcc_notify_wait(adapter);
2609 struct be_cmd_resp_get_hsw_config *resp =
2610 embedded_payload(wrb);
2611 be_dws_le_to_cpu(&resp->context,
2612 sizeof(resp->context));
2613 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2614 pvid, &resp->context);
2615 *pvid = le16_to_cpu(vid);
2619 spin_unlock_bh(&adapter->mcc_lock);
2623 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2625 struct be_mcc_wrb *wrb;
2626 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2628 int payload_len = sizeof(*req);
2629 struct be_dma_mem cmd;
2631 memset(&cmd, 0, sizeof(struct be_dma_mem));
2632 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2633 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2636 dev_err(&adapter->pdev->dev,
2637 "Memory allocation failure\n");
2641 if (mutex_lock_interruptible(&adapter->mbox_lock))
2644 wrb = wrb_from_mbox(adapter);
2652 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2653 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2654 payload_len, wrb, &cmd);
2656 req->hdr.version = 1;
2657 req->query_options = BE_GET_WOL_CAP;
2659 status = be_mbox_notify_wait(adapter);
2661 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2662 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2664 /* the command could succeed misleadingly on old f/w
2665 * which is not aware of the V1 version. fake an error. */
2666 if (resp->hdr.response_length < payload_len) {
2670 adapter->wol_cap = resp->wol_settings;
2673 mutex_unlock(&adapter->mbox_lock);
2674 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2678 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2679 struct be_dma_mem *cmd)
2681 struct be_mcc_wrb *wrb;
2682 struct be_cmd_req_get_ext_fat_caps *req;
2685 if (mutex_lock_interruptible(&adapter->mbox_lock))
2688 wrb = wrb_from_mbox(adapter);
2695 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2696 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2697 cmd->size, wrb, cmd);
2698 req->parameter_type = cpu_to_le32(1);
2700 status = be_mbox_notify_wait(adapter);
2702 mutex_unlock(&adapter->mbox_lock);
2706 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2707 struct be_dma_mem *cmd,
2708 struct be_fat_conf_params *configs)
2710 struct be_mcc_wrb *wrb;
2711 struct be_cmd_req_set_ext_fat_caps *req;
2714 spin_lock_bh(&adapter->mcc_lock);
2716 wrb = wrb_from_mccq(adapter);
2723 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2725 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2726 cmd->size, wrb, cmd);
2728 status = be_mcc_notify_wait(adapter);
2730 spin_unlock_bh(&adapter->mcc_lock);
2734 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2736 struct be_mcc_wrb *wrb;
2737 struct be_cmd_req_get_port_name *req;
2740 if (!lancer_chip(adapter)) {
2741 *port_name = adapter->hba_port_num + '0';
2745 spin_lock_bh(&adapter->mcc_lock);
2747 wrb = wrb_from_mccq(adapter);
2753 req = embedded_payload(wrb);
2755 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2756 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2758 req->hdr.version = 1;
2760 status = be_mcc_notify_wait(adapter);
2762 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2763 *port_name = resp->port_name[adapter->hba_port_num];
2765 *port_name = adapter->hba_port_num + '0';
2768 spin_unlock_bh(&adapter->mcc_lock);
2772 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2773 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2775 struct be_adapter *adapter = netdev_priv(netdev_handle);
2776 struct be_mcc_wrb *wrb;
2777 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
2778 struct be_cmd_req_hdr *req;
2779 struct be_cmd_resp_hdr *resp;
2782 spin_lock_bh(&adapter->mcc_lock);
2784 wrb = wrb_from_mccq(adapter);
2789 req = embedded_payload(wrb);
2790 resp = embedded_payload(wrb);
2792 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
2793 hdr->opcode, wrb_payload_size, wrb, NULL);
2794 memcpy(req, wrb_payload, wrb_payload_size);
2795 be_dws_cpu_to_le(req, wrb_payload_size);
2797 status = be_mcc_notify_wait(adapter);
2799 *cmd_status = (status & 0xffff);
2802 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
2803 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
2805 spin_unlock_bh(&adapter->mcc_lock);
2808 EXPORT_SYMBOL(be_roce_mcc_cmd);