2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
24 return wrb->payload.embedded_payload;
27 static void be_mcc_notify(struct be_adapter *adapter)
29 struct be_queue_info *mccq = &adapter->mcc_obj.q;
32 if (be_error(adapter))
35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
39 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
42 /* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
62 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
67 addr = ((addr << 16) << 16) | tag0;
71 static int be_mcc_compl_process(struct be_adapter *adapter,
72 struct be_mcc_compl *compl)
74 u16 compl_status, extd_status;
75 struct be_cmd_resp_hdr *resp_hdr;
76 u8 opcode = 0, subsystem = 0;
78 /* Just swap the status to host endian; mcc tag is opaquely copied
80 be_dws_le_to_cpu(compl, 4);
82 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
83 CQE_STATUS_COMPL_MASK;
85 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
88 opcode = resp_hdr->opcode;
89 subsystem = resp_hdr->subsystem;
92 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
93 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
94 (subsystem == CMD_SUBSYSTEM_COMMON)) {
95 adapter->flash_status = compl_status;
96 complete(&adapter->flash_compl);
99 if (compl_status == MCC_STATUS_SUCCESS) {
100 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
101 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
102 (subsystem == CMD_SUBSYSTEM_ETH)) {
103 be_parse_stats(adapter);
104 adapter->stats_cmd_sent = false;
106 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
107 subsystem == CMD_SUBSYSTEM_COMMON) {
108 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
110 adapter->drv_stats.be_on_die_temperature =
111 resp->on_die_temperature;
114 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
115 adapter->be_get_temp_freq = 0;
117 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
118 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n",
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
127 CQE_STATUS_EXTD_MASK;
128 dev_err(&adapter->pdev->dev,
129 "opcode %d-%d failed:status %d-%d\n",
130 opcode, subsystem, compl_status, extd_status);
137 /* Link state evt is a string of bytes; no need for endian swapping */
138 static void be_async_link_state_process(struct be_adapter *adapter,
139 struct be_async_event_link_state *evt)
141 /* When link status changes, link speed must be re-queried from FW */
142 adapter->phy.link_speed = -1;
144 /* Ignore physical link event */
145 if (lancer_chip(adapter) &&
146 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
149 /* For the initial link status do not rely on the ASYNC event as
150 * it may not be received in some cases.
152 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
153 be_link_status_update(adapter, evt->port_link_status);
156 /* Grp5 CoS Priority evt */
157 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
158 struct be_async_event_grp5_cos_priority *evt)
161 adapter->vlan_prio_bmap = evt->available_priority_bmap;
162 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
163 adapter->recommended_prio =
164 evt->reco_default_priority << VLAN_PRIO_SHIFT;
168 /* Grp5 QOS Speed evt */
169 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
170 struct be_async_event_grp5_qos_link_speed *evt)
172 if (evt->physical_port == adapter->port_num) {
173 /* qos_link_speed is in units of 10 Mbps */
174 adapter->phy.link_speed = evt->qos_link_speed * 10;
179 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
180 struct be_async_event_grp5_pvid_state *evt)
183 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
188 static void be_async_grp5_evt_process(struct be_adapter *adapter,
189 u32 trailer, struct be_mcc_compl *evt)
193 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
194 ASYNC_TRAILER_EVENT_TYPE_MASK;
196 switch (event_type) {
197 case ASYNC_EVENT_COS_PRIORITY:
198 be_async_grp5_cos_priority_process(adapter,
199 (struct be_async_event_grp5_cos_priority *)evt);
201 case ASYNC_EVENT_QOS_SPEED:
202 be_async_grp5_qos_speed_process(adapter,
203 (struct be_async_event_grp5_qos_link_speed *)evt);
205 case ASYNC_EVENT_PVID_STATE:
206 be_async_grp5_pvid_state_process(adapter,
207 (struct be_async_event_grp5_pvid_state *)evt);
210 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
215 static inline bool is_link_state_evt(u32 trailer)
217 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
218 ASYNC_TRAILER_EVENT_CODE_MASK) ==
219 ASYNC_EVENT_CODE_LINK_STATE;
222 static inline bool is_grp5_evt(u32 trailer)
224 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
225 ASYNC_TRAILER_EVENT_CODE_MASK) ==
226 ASYNC_EVENT_CODE_GRP_5);
229 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
231 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
232 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
234 if (be_mcc_compl_is_new(compl)) {
235 queue_tail_inc(mcc_cq);
241 void be_async_mcc_enable(struct be_adapter *adapter)
243 spin_lock_bh(&adapter->mcc_cq_lock);
245 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
246 adapter->mcc_obj.rearm_cq = true;
248 spin_unlock_bh(&adapter->mcc_cq_lock);
251 void be_async_mcc_disable(struct be_adapter *adapter)
253 adapter->mcc_obj.rearm_cq = false;
256 int be_process_mcc(struct be_adapter *adapter)
258 struct be_mcc_compl *compl;
259 int num = 0, status = 0;
260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
262 spin_lock_bh(&adapter->mcc_cq_lock);
263 while ((compl = be_mcc_compl_get(adapter))) {
264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
265 /* Interpret flags as an async trailer */
266 if (is_link_state_evt(compl->flags))
267 be_async_link_state_process(adapter,
268 (struct be_async_event_link_state *) compl);
269 else if (is_grp5_evt(compl->flags))
270 be_async_grp5_evt_process(adapter,
271 compl->flags, compl);
272 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
273 status = be_mcc_compl_process(adapter, compl);
274 atomic_dec(&mcc_obj->q.used);
276 be_mcc_compl_use(compl);
281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
283 spin_unlock_bh(&adapter->mcc_cq_lock);
287 /* Wait till no more pending mcc requests are present */
288 static int be_mcc_wait_compl(struct be_adapter *adapter)
290 #define mcc_timeout 120000 /* 12s timeout */
292 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
294 for (i = 0; i < mcc_timeout; i++) {
295 if (be_error(adapter))
298 status = be_process_mcc(adapter);
300 if (atomic_read(&mcc_obj->q.used) == 0)
304 if (i == mcc_timeout) {
305 dev_err(&adapter->pdev->dev, "FW not responding\n");
306 adapter->fw_timeout = true;
312 /* Notify MCC requests and wait for completion */
313 static int be_mcc_notify_wait(struct be_adapter *adapter)
316 struct be_mcc_wrb *wrb;
317 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
318 u16 index = mcc_obj->q.head;
319 struct be_cmd_resp_hdr *resp;
321 index_dec(&index, mcc_obj->q.len);
322 wrb = queue_index_node(&mcc_obj->q, index);
324 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
326 be_mcc_notify(adapter);
328 status = be_mcc_wait_compl(adapter);
332 status = resp->status;
337 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
343 if (be_error(adapter))
346 ready = ioread32(db);
347 if (ready == 0xffffffff)
350 ready &= MPU_MAILBOX_DB_RDY_MASK;
355 dev_err(&adapter->pdev->dev, "FW not responding\n");
356 adapter->fw_timeout = true;
357 be_detect_error(adapter);
369 * Insert the mailbox address into the doorbell in two steps
370 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
372 static int be_mbox_notify_wait(struct be_adapter *adapter)
376 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
377 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
378 struct be_mcc_mailbox *mbox = mbox_mem->va;
379 struct be_mcc_compl *compl = &mbox->compl;
381 /* wait for ready to be set */
382 status = be_mbox_db_ready_wait(adapter, db);
386 val |= MPU_MAILBOX_DB_HI_MASK;
387 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
388 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
391 /* wait for ready to be set */
392 status = be_mbox_db_ready_wait(adapter, db);
397 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
398 val |= (u32)(mbox_mem->dma >> 4) << 2;
401 status = be_mbox_db_ready_wait(adapter, db);
405 /* A cq entry has been made now */
406 if (be_mcc_compl_is_new(compl)) {
407 status = be_mcc_compl_process(adapter, &mbox->compl);
408 be_mcc_compl_use(compl);
412 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
418 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
422 if (lancer_chip(adapter))
423 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
425 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
427 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
428 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
434 int lancer_wait_ready(struct be_adapter *adapter)
436 #define SLIPORT_READY_TIMEOUT 30
440 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
441 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
442 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
448 if (i == SLIPORT_READY_TIMEOUT)
454 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
457 u32 sliport_status, err, reset_needed;
458 status = lancer_wait_ready(adapter);
460 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
461 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
462 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
463 if (err && reset_needed) {
464 iowrite32(SLI_PORT_CONTROL_IP_MASK,
465 adapter->db + SLIPORT_CONTROL_OFFSET);
467 /* check adapter has corrected the error */
468 status = lancer_wait_ready(adapter);
469 sliport_status = ioread32(adapter->db +
470 SLIPORT_STATUS_OFFSET);
471 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
472 SLIPORT_STATUS_RN_MASK);
473 if (status || sliport_status)
475 } else if (err || reset_needed) {
482 int be_fw_wait_ready(struct be_adapter *adapter)
485 int status, timeout = 0;
486 struct device *dev = &adapter->pdev->dev;
488 if (lancer_chip(adapter)) {
489 status = lancer_wait_ready(adapter);
494 status = be_POST_stage_get(adapter, &stage);
496 dev_err(dev, "POST error; stage=0x%x\n", stage);
498 } else if (stage != POST_STAGE_ARMFW_RDY) {
499 if (msleep_interruptible(2000)) {
500 dev_err(dev, "Waiting for POST aborted\n");
507 } while (timeout < 60);
509 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
514 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
516 return &wrb->payload.sgl[0];
520 /* Don't touch the hdr after it's prepared */
521 /* mem will be NULL for embedded commands */
522 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
523 u8 subsystem, u8 opcode, int cmd_len,
524 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
527 unsigned long addr = (unsigned long)req_hdr;
530 req_hdr->opcode = opcode;
531 req_hdr->subsystem = subsystem;
532 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
533 req_hdr->version = 0;
535 wrb->tag0 = req_addr & 0xFFFFFFFF;
536 wrb->tag1 = upper_32_bits(req_addr);
538 wrb->payload_length = cmd_len;
540 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
541 MCC_WRB_SGE_CNT_SHIFT;
542 sge = nonembedded_sgl(wrb);
543 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
544 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
545 sge->len = cpu_to_le32(mem->size);
547 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
548 be_dws_cpu_to_le(wrb, 8);
551 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
552 struct be_dma_mem *mem)
554 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
555 u64 dma = (u64)mem->dma;
557 for (i = 0; i < buf_pages; i++) {
558 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
559 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
564 /* Converts interrupt delay in microseconds to multiplier value */
565 static u32 eq_delay_to_mult(u32 usec_delay)
567 #define MAX_INTR_RATE 651042
568 const u32 round = 10;
574 u32 interrupt_rate = 1000000 / usec_delay;
575 /* Max delay, corresponding to the lowest interrupt rate */
576 if (interrupt_rate == 0)
579 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
580 multiplier /= interrupt_rate;
581 /* Round the multiplier to the closest value.*/
582 multiplier = (multiplier + round/2) / round;
583 multiplier = min(multiplier, (u32)1023);
589 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
591 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
592 struct be_mcc_wrb *wrb
593 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
594 memset(wrb, 0, sizeof(*wrb));
598 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
600 struct be_queue_info *mccq = &adapter->mcc_obj.q;
601 struct be_mcc_wrb *wrb;
603 if (atomic_read(&mccq->used) >= mccq->len) {
604 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
608 wrb = queue_head_node(mccq);
609 queue_head_inc(mccq);
610 atomic_inc(&mccq->used);
611 memset(wrb, 0, sizeof(*wrb));
615 /* Tell fw we're about to start firing cmds by writing a
616 * special pattern across the wrb hdr; uses mbox
618 int be_cmd_fw_init(struct be_adapter *adapter)
623 if (lancer_chip(adapter))
626 if (mutex_lock_interruptible(&adapter->mbox_lock))
629 wrb = (u8 *)wrb_from_mbox(adapter);
639 status = be_mbox_notify_wait(adapter);
641 mutex_unlock(&adapter->mbox_lock);
645 /* Tell fw we're done with firing cmds by writing a
646 * special pattern across the wrb hdr; uses mbox
648 int be_cmd_fw_clean(struct be_adapter *adapter)
653 if (lancer_chip(adapter))
656 if (mutex_lock_interruptible(&adapter->mbox_lock))
659 wrb = (u8 *)wrb_from_mbox(adapter);
669 status = be_mbox_notify_wait(adapter);
671 mutex_unlock(&adapter->mbox_lock);
675 int be_cmd_eq_create(struct be_adapter *adapter,
676 struct be_queue_info *eq, int eq_delay)
678 struct be_mcc_wrb *wrb;
679 struct be_cmd_req_eq_create *req;
680 struct be_dma_mem *q_mem = &eq->dma_mem;
683 if (mutex_lock_interruptible(&adapter->mbox_lock))
686 wrb = wrb_from_mbox(adapter);
687 req = embedded_payload(wrb);
689 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
690 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
692 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
694 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
696 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
697 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
698 __ilog2_u32(eq->len/256));
699 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
700 eq_delay_to_mult(eq_delay));
701 be_dws_cpu_to_le(req->context, sizeof(req->context));
703 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
705 status = be_mbox_notify_wait(adapter);
707 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
708 eq->id = le16_to_cpu(resp->eq_id);
712 mutex_unlock(&adapter->mbox_lock);
717 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
718 u8 type, bool permanent, u32 if_handle, u32 pmac_id)
720 struct be_mcc_wrb *wrb;
721 struct be_cmd_req_mac_query *req;
724 spin_lock_bh(&adapter->mcc_lock);
726 wrb = wrb_from_mccq(adapter);
731 req = embedded_payload(wrb);
733 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
734 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
739 req->if_id = cpu_to_le16((u16) if_handle);
740 req->pmac_id = cpu_to_le32(pmac_id);
744 status = be_mcc_notify_wait(adapter);
746 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
747 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
751 spin_unlock_bh(&adapter->mcc_lock);
755 /* Uses synchronous MCCQ */
756 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
757 u32 if_id, u32 *pmac_id, u32 domain)
759 struct be_mcc_wrb *wrb;
760 struct be_cmd_req_pmac_add *req;
763 spin_lock_bh(&adapter->mcc_lock);
765 wrb = wrb_from_mccq(adapter);
770 req = embedded_payload(wrb);
772 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
773 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
775 req->hdr.domain = domain;
776 req->if_id = cpu_to_le32(if_id);
777 memcpy(req->mac_address, mac_addr, ETH_ALEN);
779 status = be_mcc_notify_wait(adapter);
781 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
782 *pmac_id = le32_to_cpu(resp->pmac_id);
786 spin_unlock_bh(&adapter->mcc_lock);
788 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
794 /* Uses synchronous MCCQ */
795 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
797 struct be_mcc_wrb *wrb;
798 struct be_cmd_req_pmac_del *req;
804 spin_lock_bh(&adapter->mcc_lock);
806 wrb = wrb_from_mccq(adapter);
811 req = embedded_payload(wrb);
813 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
814 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
816 req->hdr.domain = dom;
817 req->if_id = cpu_to_le32(if_id);
818 req->pmac_id = cpu_to_le32(pmac_id);
820 status = be_mcc_notify_wait(adapter);
823 spin_unlock_bh(&adapter->mcc_lock);
828 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
829 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
831 struct be_mcc_wrb *wrb;
832 struct be_cmd_req_cq_create *req;
833 struct be_dma_mem *q_mem = &cq->dma_mem;
837 if (mutex_lock_interruptible(&adapter->mbox_lock))
840 wrb = wrb_from_mbox(adapter);
841 req = embedded_payload(wrb);
842 ctxt = &req->context;
844 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
845 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
847 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
848 if (lancer_chip(adapter)) {
849 req->hdr.version = 2;
850 req->page_size = 1; /* 1 for 4K */
851 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
853 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
854 __ilog2_u32(cq->len/256));
855 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
856 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
858 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
861 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
863 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
865 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
866 __ilog2_u32(cq->len/256));
867 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
868 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
869 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
872 be_dws_cpu_to_le(ctxt, sizeof(req->context));
874 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
876 status = be_mbox_notify_wait(adapter);
878 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
879 cq->id = le16_to_cpu(resp->cq_id);
883 mutex_unlock(&adapter->mbox_lock);
888 static u32 be_encoded_q_len(int q_len)
890 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
891 if (len_encoded == 16)
896 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
897 struct be_queue_info *mccq,
898 struct be_queue_info *cq)
900 struct be_mcc_wrb *wrb;
901 struct be_cmd_req_mcc_ext_create *req;
902 struct be_dma_mem *q_mem = &mccq->dma_mem;
906 if (mutex_lock_interruptible(&adapter->mbox_lock))
909 wrb = wrb_from_mbox(adapter);
910 req = embedded_payload(wrb);
911 ctxt = &req->context;
913 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
914 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
916 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
917 if (lancer_chip(adapter)) {
918 req->hdr.version = 1;
919 req->cq_id = cpu_to_le16(cq->id);
921 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
922 be_encoded_q_len(mccq->len));
923 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
924 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
926 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
930 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
931 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
932 be_encoded_q_len(mccq->len));
933 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
936 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
937 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
938 be_dws_cpu_to_le(ctxt, sizeof(req->context));
940 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
942 status = be_mbox_notify_wait(adapter);
944 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
945 mccq->id = le16_to_cpu(resp->id);
946 mccq->created = true;
948 mutex_unlock(&adapter->mbox_lock);
953 int be_cmd_mccq_org_create(struct be_adapter *adapter,
954 struct be_queue_info *mccq,
955 struct be_queue_info *cq)
957 struct be_mcc_wrb *wrb;
958 struct be_cmd_req_mcc_create *req;
959 struct be_dma_mem *q_mem = &mccq->dma_mem;
963 if (mutex_lock_interruptible(&adapter->mbox_lock))
966 wrb = wrb_from_mbox(adapter);
967 req = embedded_payload(wrb);
968 ctxt = &req->context;
970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
971 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
973 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
975 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
976 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
977 be_encoded_q_len(mccq->len));
978 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
980 be_dws_cpu_to_le(ctxt, sizeof(req->context));
982 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
984 status = be_mbox_notify_wait(adapter);
986 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
987 mccq->id = le16_to_cpu(resp->id);
988 mccq->created = true;
991 mutex_unlock(&adapter->mbox_lock);
995 int be_cmd_mccq_create(struct be_adapter *adapter,
996 struct be_queue_info *mccq,
997 struct be_queue_info *cq)
1001 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1002 if (status && !lancer_chip(adapter)) {
1003 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1004 "or newer to avoid conflicting priorities between NIC "
1005 "and FCoE traffic");
1006 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1011 int be_cmd_txq_create(struct be_adapter *adapter,
1012 struct be_queue_info *txq,
1013 struct be_queue_info *cq)
1015 struct be_mcc_wrb *wrb;
1016 struct be_cmd_req_eth_tx_create *req;
1017 struct be_dma_mem *q_mem = &txq->dma_mem;
1021 spin_lock_bh(&adapter->mcc_lock);
1023 wrb = wrb_from_mccq(adapter);
1029 req = embedded_payload(wrb);
1030 ctxt = &req->context;
1032 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1033 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1035 if (lancer_chip(adapter)) {
1036 req->hdr.version = 1;
1037 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1038 adapter->if_handle);
1041 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1042 req->ulp_num = BE_ULP1_NUM;
1043 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1045 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1046 be_encoded_q_len(txq->len));
1047 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1048 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1050 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1052 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1054 status = be_mcc_notify_wait(adapter);
1056 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1057 txq->id = le16_to_cpu(resp->cid);
1058 txq->created = true;
1062 spin_unlock_bh(&adapter->mcc_lock);
1068 int be_cmd_rxq_create(struct be_adapter *adapter,
1069 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1070 u32 if_id, u32 rss, u8 *rss_id)
1072 struct be_mcc_wrb *wrb;
1073 struct be_cmd_req_eth_rx_create *req;
1074 struct be_dma_mem *q_mem = &rxq->dma_mem;
1077 spin_lock_bh(&adapter->mcc_lock);
1079 wrb = wrb_from_mccq(adapter);
1084 req = embedded_payload(wrb);
1086 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1087 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1089 req->cq_id = cpu_to_le16(cq_id);
1090 req->frag_size = fls(frag_size) - 1;
1092 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1093 req->interface_id = cpu_to_le32(if_id);
1094 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1095 req->rss_queue = cpu_to_le32(rss);
1097 status = be_mcc_notify_wait(adapter);
1099 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1100 rxq->id = le16_to_cpu(resp->id);
1101 rxq->created = true;
1102 *rss_id = resp->rss_id;
1106 spin_unlock_bh(&adapter->mcc_lock);
1110 /* Generic destroyer function for all types of queues
1113 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1116 struct be_mcc_wrb *wrb;
1117 struct be_cmd_req_q_destroy *req;
1118 u8 subsys = 0, opcode = 0;
1121 if (mutex_lock_interruptible(&adapter->mbox_lock))
1124 wrb = wrb_from_mbox(adapter);
1125 req = embedded_payload(wrb);
1127 switch (queue_type) {
1129 subsys = CMD_SUBSYSTEM_COMMON;
1130 opcode = OPCODE_COMMON_EQ_DESTROY;
1133 subsys = CMD_SUBSYSTEM_COMMON;
1134 opcode = OPCODE_COMMON_CQ_DESTROY;
1137 subsys = CMD_SUBSYSTEM_ETH;
1138 opcode = OPCODE_ETH_TX_DESTROY;
1141 subsys = CMD_SUBSYSTEM_ETH;
1142 opcode = OPCODE_ETH_RX_DESTROY;
1145 subsys = CMD_SUBSYSTEM_COMMON;
1146 opcode = OPCODE_COMMON_MCC_DESTROY;
1152 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1154 req->id = cpu_to_le16(q->id);
1156 status = be_mbox_notify_wait(adapter);
1160 mutex_unlock(&adapter->mbox_lock);
1165 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1167 struct be_mcc_wrb *wrb;
1168 struct be_cmd_req_q_destroy *req;
1171 spin_lock_bh(&adapter->mcc_lock);
1173 wrb = wrb_from_mccq(adapter);
1178 req = embedded_payload(wrb);
1180 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1181 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1182 req->id = cpu_to_le16(q->id);
1184 status = be_mcc_notify_wait(adapter);
1189 spin_unlock_bh(&adapter->mcc_lock);
1193 /* Create an rx filtering policy configuration on an i/f
1196 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1197 u32 *if_handle, u32 domain)
1199 struct be_mcc_wrb *wrb;
1200 struct be_cmd_req_if_create *req;
1203 spin_lock_bh(&adapter->mcc_lock);
1205 wrb = wrb_from_mccq(adapter);
1210 req = embedded_payload(wrb);
1212 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1213 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1214 req->hdr.domain = domain;
1215 req->capability_flags = cpu_to_le32(cap_flags);
1216 req->enable_flags = cpu_to_le32(en_flags);
1218 req->pmac_invalid = true;
1220 status = be_mcc_notify_wait(adapter);
1222 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1223 *if_handle = le32_to_cpu(resp->interface_id);
1227 spin_unlock_bh(&adapter->mcc_lock);
1232 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1234 struct be_mcc_wrb *wrb;
1235 struct be_cmd_req_if_destroy *req;
1238 if (interface_id == -1)
1241 spin_lock_bh(&adapter->mcc_lock);
1243 wrb = wrb_from_mccq(adapter);
1248 req = embedded_payload(wrb);
1250 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1251 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1252 req->hdr.domain = domain;
1253 req->interface_id = cpu_to_le32(interface_id);
1255 status = be_mcc_notify_wait(adapter);
1257 spin_unlock_bh(&adapter->mcc_lock);
1261 /* Get stats is a non embedded command: the request is not embedded inside
1262 * WRB but is a separate dma memory block
1263 * Uses asynchronous MCC
1265 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1267 struct be_mcc_wrb *wrb;
1268 struct be_cmd_req_hdr *hdr;
1271 spin_lock_bh(&adapter->mcc_lock);
1273 wrb = wrb_from_mccq(adapter);
1278 hdr = nonemb_cmd->va;
1280 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1281 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1283 if (adapter->generation == BE_GEN3)
1286 be_mcc_notify(adapter);
1287 adapter->stats_cmd_sent = true;
1290 spin_unlock_bh(&adapter->mcc_lock);
1295 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1296 struct be_dma_mem *nonemb_cmd)
1299 struct be_mcc_wrb *wrb;
1300 struct lancer_cmd_req_pport_stats *req;
1303 spin_lock_bh(&adapter->mcc_lock);
1305 wrb = wrb_from_mccq(adapter);
1310 req = nonemb_cmd->va;
1312 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1313 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1316 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1317 req->cmd_params.params.reset_stats = 0;
1319 be_mcc_notify(adapter);
1320 adapter->stats_cmd_sent = true;
1323 spin_unlock_bh(&adapter->mcc_lock);
1327 /* Uses synchronous mcc */
1328 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1329 u16 *link_speed, u8 *link_status, u32 dom)
1331 struct be_mcc_wrb *wrb;
1332 struct be_cmd_req_link_status *req;
1335 spin_lock_bh(&adapter->mcc_lock);
1338 *link_status = LINK_DOWN;
1340 wrb = wrb_from_mccq(adapter);
1345 req = embedded_payload(wrb);
1347 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1348 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1350 if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1351 req->hdr.version = 1;
1353 req->hdr.domain = dom;
1355 status = be_mcc_notify_wait(adapter);
1357 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1358 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1360 *link_speed = le16_to_cpu(resp->link_speed);
1362 *mac_speed = resp->mac_speed;
1365 *link_status = resp->logical_link_status;
1369 spin_unlock_bh(&adapter->mcc_lock);
1373 /* Uses synchronous mcc */
1374 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1376 struct be_mcc_wrb *wrb;
1377 struct be_cmd_req_get_cntl_addnl_attribs *req;
1380 spin_lock_bh(&adapter->mcc_lock);
1382 wrb = wrb_from_mccq(adapter);
1387 req = embedded_payload(wrb);
1389 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1390 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1393 be_mcc_notify(adapter);
1396 spin_unlock_bh(&adapter->mcc_lock);
1400 /* Uses synchronous mcc */
1401 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1403 struct be_mcc_wrb *wrb;
1404 struct be_cmd_req_get_fat *req;
1407 spin_lock_bh(&adapter->mcc_lock);
1409 wrb = wrb_from_mccq(adapter);
1414 req = embedded_payload(wrb);
1416 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1417 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1418 req->fat_operation = cpu_to_le32(QUERY_FAT);
1419 status = be_mcc_notify_wait(adapter);
1421 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1422 if (log_size && resp->log_size)
1423 *log_size = le32_to_cpu(resp->log_size) -
1427 spin_unlock_bh(&adapter->mcc_lock);
1431 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1433 struct be_dma_mem get_fat_cmd;
1434 struct be_mcc_wrb *wrb;
1435 struct be_cmd_req_get_fat *req;
1436 u32 offset = 0, total_size, buf_size,
1437 log_offset = sizeof(u32), payload_len;
1443 total_size = buf_len;
1445 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1446 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1449 if (!get_fat_cmd.va) {
1451 dev_err(&adapter->pdev->dev,
1452 "Memory allocation failure while retrieving FAT data\n");
1456 spin_lock_bh(&adapter->mcc_lock);
1458 while (total_size) {
1459 buf_size = min(total_size, (u32)60*1024);
1460 total_size -= buf_size;
1462 wrb = wrb_from_mccq(adapter);
1467 req = get_fat_cmd.va;
1469 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1470 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1471 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1474 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1475 req->read_log_offset = cpu_to_le32(log_offset);
1476 req->read_log_length = cpu_to_le32(buf_size);
1477 req->data_buffer_size = cpu_to_le32(buf_size);
1479 status = be_mcc_notify_wait(adapter);
1481 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1482 memcpy(buf + offset,
1484 le32_to_cpu(resp->read_log_length));
1486 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1490 log_offset += buf_size;
1493 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1496 spin_unlock_bh(&adapter->mcc_lock);
1499 /* Uses synchronous mcc */
1500 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1503 struct be_mcc_wrb *wrb;
1504 struct be_cmd_req_get_fw_version *req;
1507 spin_lock_bh(&adapter->mcc_lock);
1509 wrb = wrb_from_mccq(adapter);
1515 req = embedded_payload(wrb);
1517 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1518 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1519 status = be_mcc_notify_wait(adapter);
1521 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1522 strcpy(fw_ver, resp->firmware_version_string);
1524 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1527 spin_unlock_bh(&adapter->mcc_lock);
1531 /* set the EQ delay interval of an EQ to specified value
1534 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1536 struct be_mcc_wrb *wrb;
1537 struct be_cmd_req_modify_eq_delay *req;
1540 spin_lock_bh(&adapter->mcc_lock);
1542 wrb = wrb_from_mccq(adapter);
1547 req = embedded_payload(wrb);
1549 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1550 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1552 req->num_eq = cpu_to_le32(1);
1553 req->delay[0].eq_id = cpu_to_le32(eq_id);
1554 req->delay[0].phase = 0;
1555 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1557 be_mcc_notify(adapter);
1560 spin_unlock_bh(&adapter->mcc_lock);
1564 /* Uses sycnhronous mcc */
1565 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1566 u32 num, bool untagged, bool promiscuous)
1568 struct be_mcc_wrb *wrb;
1569 struct be_cmd_req_vlan_config *req;
1572 spin_lock_bh(&adapter->mcc_lock);
1574 wrb = wrb_from_mccq(adapter);
1579 req = embedded_payload(wrb);
1581 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1582 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1584 req->interface_id = if_id;
1585 req->promiscuous = promiscuous;
1586 req->untagged = untagged;
1587 req->num_vlan = num;
1589 memcpy(req->normal_vlan, vtag_array,
1590 req->num_vlan * sizeof(vtag_array[0]));
1593 status = be_mcc_notify_wait(adapter);
1596 spin_unlock_bh(&adapter->mcc_lock);
1600 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1602 struct be_mcc_wrb *wrb;
1603 struct be_dma_mem *mem = &adapter->rx_filter;
1604 struct be_cmd_req_rx_filter *req = mem->va;
1607 spin_lock_bh(&adapter->mcc_lock);
1609 wrb = wrb_from_mccq(adapter);
1614 memset(req, 0, sizeof(*req));
1615 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1616 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1619 req->if_id = cpu_to_le32(adapter->if_handle);
1620 if (flags & IFF_PROMISC) {
1621 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1622 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1624 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1625 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1626 } else if (flags & IFF_ALLMULTI) {
1627 req->if_flags_mask = req->if_flags =
1628 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1630 struct netdev_hw_addr *ha;
1633 req->if_flags_mask = req->if_flags =
1634 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1636 /* Reset mcast promisc mode if already set by setting mask
1637 * and not setting flags field
1639 if (!lancer_chip(adapter) || be_physfn(adapter))
1640 req->if_flags_mask |=
1641 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1643 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1644 netdev_for_each_mc_addr(ha, adapter->netdev)
1645 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1648 status = be_mcc_notify_wait(adapter);
1650 spin_unlock_bh(&adapter->mcc_lock);
1654 /* Uses synchrounous mcc */
1655 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1657 struct be_mcc_wrb *wrb;
1658 struct be_cmd_req_set_flow_control *req;
1661 spin_lock_bh(&adapter->mcc_lock);
1663 wrb = wrb_from_mccq(adapter);
1668 req = embedded_payload(wrb);
1670 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1671 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1673 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1674 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1676 status = be_mcc_notify_wait(adapter);
1679 spin_unlock_bh(&adapter->mcc_lock);
1684 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1686 struct be_mcc_wrb *wrb;
1687 struct be_cmd_req_get_flow_control *req;
1690 spin_lock_bh(&adapter->mcc_lock);
1692 wrb = wrb_from_mccq(adapter);
1697 req = embedded_payload(wrb);
1699 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1700 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1702 status = be_mcc_notify_wait(adapter);
1704 struct be_cmd_resp_get_flow_control *resp =
1705 embedded_payload(wrb);
1706 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1707 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1711 spin_unlock_bh(&adapter->mcc_lock);
1716 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1717 u32 *mode, u32 *caps)
1719 struct be_mcc_wrb *wrb;
1720 struct be_cmd_req_query_fw_cfg *req;
1723 if (mutex_lock_interruptible(&adapter->mbox_lock))
1726 wrb = wrb_from_mbox(adapter);
1727 req = embedded_payload(wrb);
1729 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1730 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1732 status = be_mbox_notify_wait(adapter);
1734 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1735 *port_num = le32_to_cpu(resp->phys_port);
1736 *mode = le32_to_cpu(resp->function_mode);
1737 *caps = le32_to_cpu(resp->function_caps);
1740 mutex_unlock(&adapter->mbox_lock);
1745 int be_cmd_reset_function(struct be_adapter *adapter)
1747 struct be_mcc_wrb *wrb;
1748 struct be_cmd_req_hdr *req;
1751 if (lancer_chip(adapter)) {
1752 status = lancer_wait_ready(adapter);
1754 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1755 adapter->db + SLIPORT_CONTROL_OFFSET);
1756 status = lancer_test_and_set_rdy_state(adapter);
1759 dev_err(&adapter->pdev->dev,
1760 "Adapter in non recoverable error\n");
1765 if (mutex_lock_interruptible(&adapter->mbox_lock))
1768 wrb = wrb_from_mbox(adapter);
1769 req = embedded_payload(wrb);
1771 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1772 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1774 status = be_mbox_notify_wait(adapter);
1776 mutex_unlock(&adapter->mbox_lock);
1780 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1782 struct be_mcc_wrb *wrb;
1783 struct be_cmd_req_rss_config *req;
1784 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1785 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1786 0x3ea83c02, 0x4a110304};
1789 if (mutex_lock_interruptible(&adapter->mbox_lock))
1792 wrb = wrb_from_mbox(adapter);
1793 req = embedded_payload(wrb);
1795 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1796 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1798 req->if_id = cpu_to_le32(adapter->if_handle);
1799 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1800 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1802 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1803 req->hdr.version = 1;
1804 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1805 RSS_ENABLE_UDP_IPV6);
1808 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1809 memcpy(req->cpu_table, rsstable, table_size);
1810 memcpy(req->hash, myhash, sizeof(myhash));
1811 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1813 status = be_mbox_notify_wait(adapter);
1815 mutex_unlock(&adapter->mbox_lock);
1820 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1821 u8 bcn, u8 sts, u8 state)
1823 struct be_mcc_wrb *wrb;
1824 struct be_cmd_req_enable_disable_beacon *req;
1827 spin_lock_bh(&adapter->mcc_lock);
1829 wrb = wrb_from_mccq(adapter);
1834 req = embedded_payload(wrb);
1836 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1837 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1839 req->port_num = port_num;
1840 req->beacon_state = state;
1841 req->beacon_duration = bcn;
1842 req->status_duration = sts;
1844 status = be_mcc_notify_wait(adapter);
1847 spin_unlock_bh(&adapter->mcc_lock);
1852 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1854 struct be_mcc_wrb *wrb;
1855 struct be_cmd_req_get_beacon_state *req;
1858 spin_lock_bh(&adapter->mcc_lock);
1860 wrb = wrb_from_mccq(adapter);
1865 req = embedded_payload(wrb);
1867 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1868 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1870 req->port_num = port_num;
1872 status = be_mcc_notify_wait(adapter);
1874 struct be_cmd_resp_get_beacon_state *resp =
1875 embedded_payload(wrb);
1876 *state = resp->beacon_state;
1880 spin_unlock_bh(&adapter->mcc_lock);
1884 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1885 u32 data_size, u32 data_offset,
1886 const char *obj_name, u32 *data_written,
1887 u8 *change_status, u8 *addn_status)
1889 struct be_mcc_wrb *wrb;
1890 struct lancer_cmd_req_write_object *req;
1891 struct lancer_cmd_resp_write_object *resp;
1895 spin_lock_bh(&adapter->mcc_lock);
1896 adapter->flash_status = 0;
1898 wrb = wrb_from_mccq(adapter);
1904 req = embedded_payload(wrb);
1906 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1907 OPCODE_COMMON_WRITE_OBJECT,
1908 sizeof(struct lancer_cmd_req_write_object), wrb,
1911 ctxt = &req->context;
1912 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1913 write_length, ctxt, data_size);
1916 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1919 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1922 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1923 req->write_offset = cpu_to_le32(data_offset);
1924 strcpy(req->object_name, obj_name);
1925 req->descriptor_count = cpu_to_le32(1);
1926 req->buf_len = cpu_to_le32(data_size);
1927 req->addr_low = cpu_to_le32((cmd->dma +
1928 sizeof(struct lancer_cmd_req_write_object))
1930 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1931 sizeof(struct lancer_cmd_req_write_object)));
1933 be_mcc_notify(adapter);
1934 spin_unlock_bh(&adapter->mcc_lock);
1936 if (!wait_for_completion_timeout(&adapter->flash_compl,
1937 msecs_to_jiffies(30000)))
1940 status = adapter->flash_status;
1942 resp = embedded_payload(wrb);
1944 *data_written = le32_to_cpu(resp->actual_write_len);
1945 *change_status = resp->change_status;
1947 *addn_status = resp->additional_status;
1953 spin_unlock_bh(&adapter->mcc_lock);
1957 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1958 u32 data_size, u32 data_offset, const char *obj_name,
1959 u32 *data_read, u32 *eof, u8 *addn_status)
1961 struct be_mcc_wrb *wrb;
1962 struct lancer_cmd_req_read_object *req;
1963 struct lancer_cmd_resp_read_object *resp;
1966 spin_lock_bh(&adapter->mcc_lock);
1968 wrb = wrb_from_mccq(adapter);
1974 req = embedded_payload(wrb);
1976 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1977 OPCODE_COMMON_READ_OBJECT,
1978 sizeof(struct lancer_cmd_req_read_object), wrb,
1981 req->desired_read_len = cpu_to_le32(data_size);
1982 req->read_offset = cpu_to_le32(data_offset);
1983 strcpy(req->object_name, obj_name);
1984 req->descriptor_count = cpu_to_le32(1);
1985 req->buf_len = cpu_to_le32(data_size);
1986 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1987 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1989 status = be_mcc_notify_wait(adapter);
1991 resp = embedded_payload(wrb);
1993 *data_read = le32_to_cpu(resp->actual_read_len);
1994 *eof = le32_to_cpu(resp->eof);
1996 *addn_status = resp->additional_status;
2000 spin_unlock_bh(&adapter->mcc_lock);
2004 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2005 u32 flash_type, u32 flash_opcode, u32 buf_size)
2007 struct be_mcc_wrb *wrb;
2008 struct be_cmd_write_flashrom *req;
2011 spin_lock_bh(&adapter->mcc_lock);
2012 adapter->flash_status = 0;
2014 wrb = wrb_from_mccq(adapter);
2021 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2022 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2024 req->params.op_type = cpu_to_le32(flash_type);
2025 req->params.op_code = cpu_to_le32(flash_opcode);
2026 req->params.data_buf_size = cpu_to_le32(buf_size);
2028 be_mcc_notify(adapter);
2029 spin_unlock_bh(&adapter->mcc_lock);
2031 if (!wait_for_completion_timeout(&adapter->flash_compl,
2032 msecs_to_jiffies(40000)))
2035 status = adapter->flash_status;
2040 spin_unlock_bh(&adapter->mcc_lock);
2044 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2047 struct be_mcc_wrb *wrb;
2048 struct be_cmd_write_flashrom *req;
2051 spin_lock_bh(&adapter->mcc_lock);
2053 wrb = wrb_from_mccq(adapter);
2058 req = embedded_payload(wrb);
2060 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2061 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
2063 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2064 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2065 req->params.offset = cpu_to_le32(offset);
2066 req->params.data_buf_size = cpu_to_le32(0x4);
2068 status = be_mcc_notify_wait(adapter);
2070 memcpy(flashed_crc, req->params.data_buf, 4);
2073 spin_unlock_bh(&adapter->mcc_lock);
2077 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2078 struct be_dma_mem *nonemb_cmd)
2080 struct be_mcc_wrb *wrb;
2081 struct be_cmd_req_acpi_wol_magic_config *req;
2084 spin_lock_bh(&adapter->mcc_lock);
2086 wrb = wrb_from_mccq(adapter);
2091 req = nonemb_cmd->va;
2093 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2094 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2096 memcpy(req->magic_mac, mac, ETH_ALEN);
2098 status = be_mcc_notify_wait(adapter);
2101 spin_unlock_bh(&adapter->mcc_lock);
2105 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2106 u8 loopback_type, u8 enable)
2108 struct be_mcc_wrb *wrb;
2109 struct be_cmd_req_set_lmode *req;
2112 spin_lock_bh(&adapter->mcc_lock);
2114 wrb = wrb_from_mccq(adapter);
2120 req = embedded_payload(wrb);
2122 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2123 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2126 req->src_port = port_num;
2127 req->dest_port = port_num;
2128 req->loopback_type = loopback_type;
2129 req->loopback_state = enable;
2131 status = be_mcc_notify_wait(adapter);
2133 spin_unlock_bh(&adapter->mcc_lock);
2137 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2138 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2140 struct be_mcc_wrb *wrb;
2141 struct be_cmd_req_loopback_test *req;
2144 spin_lock_bh(&adapter->mcc_lock);
2146 wrb = wrb_from_mccq(adapter);
2152 req = embedded_payload(wrb);
2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2155 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2156 req->hdr.timeout = cpu_to_le32(4);
2158 req->pattern = cpu_to_le64(pattern);
2159 req->src_port = cpu_to_le32(port_num);
2160 req->dest_port = cpu_to_le32(port_num);
2161 req->pkt_size = cpu_to_le32(pkt_size);
2162 req->num_pkts = cpu_to_le32(num_pkts);
2163 req->loopback_type = cpu_to_le32(loopback_type);
2165 status = be_mcc_notify_wait(adapter);
2167 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2168 status = le32_to_cpu(resp->status);
2172 spin_unlock_bh(&adapter->mcc_lock);
2176 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2177 u32 byte_cnt, struct be_dma_mem *cmd)
2179 struct be_mcc_wrb *wrb;
2180 struct be_cmd_req_ddrdma_test *req;
2184 spin_lock_bh(&adapter->mcc_lock);
2186 wrb = wrb_from_mccq(adapter);
2192 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2193 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2195 req->pattern = cpu_to_le64(pattern);
2196 req->byte_count = cpu_to_le32(byte_cnt);
2197 for (i = 0; i < byte_cnt; i++) {
2198 req->snd_buff[i] = (u8)(pattern >> (j*8));
2204 status = be_mcc_notify_wait(adapter);
2207 struct be_cmd_resp_ddrdma_test *resp;
2209 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2216 spin_unlock_bh(&adapter->mcc_lock);
2220 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2221 struct be_dma_mem *nonemb_cmd)
2223 struct be_mcc_wrb *wrb;
2224 struct be_cmd_req_seeprom_read *req;
2228 spin_lock_bh(&adapter->mcc_lock);
2230 wrb = wrb_from_mccq(adapter);
2235 req = nonemb_cmd->va;
2236 sge = nonembedded_sgl(wrb);
2238 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2239 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2242 status = be_mcc_notify_wait(adapter);
2245 spin_unlock_bh(&adapter->mcc_lock);
2249 int be_cmd_get_phy_info(struct be_adapter *adapter)
2251 struct be_mcc_wrb *wrb;
2252 struct be_cmd_req_get_phy_info *req;
2253 struct be_dma_mem cmd;
2256 spin_lock_bh(&adapter->mcc_lock);
2258 wrb = wrb_from_mccq(adapter);
2263 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2264 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2267 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2274 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2275 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2278 status = be_mcc_notify_wait(adapter);
2280 struct be_phy_info *resp_phy_info =
2281 cmd.va + sizeof(struct be_cmd_req_hdr);
2282 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2283 adapter->phy.interface_type =
2284 le16_to_cpu(resp_phy_info->interface_type);
2285 adapter->phy.auto_speeds_supported =
2286 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2287 adapter->phy.fixed_speeds_supported =
2288 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2289 adapter->phy.misc_params =
2290 le32_to_cpu(resp_phy_info->misc_params);
2292 pci_free_consistent(adapter->pdev, cmd.size,
2295 spin_unlock_bh(&adapter->mcc_lock);
2299 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2301 struct be_mcc_wrb *wrb;
2302 struct be_cmd_req_set_qos *req;
2305 spin_lock_bh(&adapter->mcc_lock);
2307 wrb = wrb_from_mccq(adapter);
2313 req = embedded_payload(wrb);
2315 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2316 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2318 req->hdr.domain = domain;
2319 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2320 req->max_bps_nic = cpu_to_le32(bps);
2322 status = be_mcc_notify_wait(adapter);
2325 spin_unlock_bh(&adapter->mcc_lock);
2329 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2331 struct be_mcc_wrb *wrb;
2332 struct be_cmd_req_cntl_attribs *req;
2333 struct be_cmd_resp_cntl_attribs *resp;
2335 int payload_len = max(sizeof(*req), sizeof(*resp));
2336 struct mgmt_controller_attrib *attribs;
2337 struct be_dma_mem attribs_cmd;
2339 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2340 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2341 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2343 if (!attribs_cmd.va) {
2344 dev_err(&adapter->pdev->dev,
2345 "Memory allocation failure\n");
2349 if (mutex_lock_interruptible(&adapter->mbox_lock))
2352 wrb = wrb_from_mbox(adapter);
2357 req = attribs_cmd.va;
2359 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2360 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2363 status = be_mbox_notify_wait(adapter);
2365 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2366 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2370 mutex_unlock(&adapter->mbox_lock);
2371 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2377 int be_cmd_req_native_mode(struct be_adapter *adapter)
2379 struct be_mcc_wrb *wrb;
2380 struct be_cmd_req_set_func_cap *req;
2383 if (mutex_lock_interruptible(&adapter->mbox_lock))
2386 wrb = wrb_from_mbox(adapter);
2392 req = embedded_payload(wrb);
2394 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2395 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2397 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2398 CAPABILITY_BE3_NATIVE_ERX_API);
2399 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2401 status = be_mbox_notify_wait(adapter);
2403 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2404 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2405 CAPABILITY_BE3_NATIVE_ERX_API;
2408 mutex_unlock(&adapter->mbox_lock);
2412 /* Uses synchronous MCCQ */
2413 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2414 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2416 struct be_mcc_wrb *wrb;
2417 struct be_cmd_req_get_mac_list *req;
2420 struct be_dma_mem get_mac_list_cmd;
2423 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2424 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2425 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2426 get_mac_list_cmd.size,
2427 &get_mac_list_cmd.dma);
2429 if (!get_mac_list_cmd.va) {
2430 dev_err(&adapter->pdev->dev,
2431 "Memory allocation failure during GET_MAC_LIST\n");
2435 spin_lock_bh(&adapter->mcc_lock);
2437 wrb = wrb_from_mccq(adapter);
2443 req = get_mac_list_cmd.va;
2445 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2446 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2447 wrb, &get_mac_list_cmd);
2449 req->hdr.domain = domain;
2450 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2451 req->perm_override = 1;
2453 status = be_mcc_notify_wait(adapter);
2455 struct be_cmd_resp_get_mac_list *resp =
2456 get_mac_list_cmd.va;
2457 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2458 /* Mac list returned could contain one or more active mac_ids
2459 * or one or more true or pseudo permanant mac addresses.
2460 * If an active mac_id is present, return first active mac_id
2463 for (i = 0; i < mac_count; i++) {
2464 struct get_list_macaddr *mac_entry;
2468 mac_entry = &resp->macaddr_list[i];
2469 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2470 /* mac_id is a 32 bit value and mac_addr size
2473 if (mac_addr_size == sizeof(u32)) {
2474 *pmac_id_active = true;
2475 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2476 *pmac_id = le32_to_cpu(mac_id);
2480 /* If no active mac_id found, return first mac addr */
2481 *pmac_id_active = false;
2482 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2487 spin_unlock_bh(&adapter->mcc_lock);
2488 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2489 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2493 /* Uses synchronous MCCQ */
2494 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2495 u8 mac_count, u32 domain)
2497 struct be_mcc_wrb *wrb;
2498 struct be_cmd_req_set_mac_list *req;
2500 struct be_dma_mem cmd;
2502 memset(&cmd, 0, sizeof(struct be_dma_mem));
2503 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2504 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2505 &cmd.dma, GFP_KERNEL);
2507 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2511 spin_lock_bh(&adapter->mcc_lock);
2513 wrb = wrb_from_mccq(adapter);
2520 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2521 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2524 req->hdr.domain = domain;
2525 req->mac_count = mac_count;
2527 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2529 status = be_mcc_notify_wait(adapter);
2532 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2534 spin_unlock_bh(&adapter->mcc_lock);
2538 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2539 u32 domain, u16 intf_id)
2541 struct be_mcc_wrb *wrb;
2542 struct be_cmd_req_set_hsw_config *req;
2546 spin_lock_bh(&adapter->mcc_lock);
2548 wrb = wrb_from_mccq(adapter);
2554 req = embedded_payload(wrb);
2555 ctxt = &req->context;
2557 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2558 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2560 req->hdr.domain = domain;
2561 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2563 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2564 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2567 be_dws_cpu_to_le(req->context, sizeof(req->context));
2568 status = be_mcc_notify_wait(adapter);
2571 spin_unlock_bh(&adapter->mcc_lock);
2575 /* Get Hyper switch config */
2576 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2577 u32 domain, u16 intf_id)
2579 struct be_mcc_wrb *wrb;
2580 struct be_cmd_req_get_hsw_config *req;
2585 spin_lock_bh(&adapter->mcc_lock);
2587 wrb = wrb_from_mccq(adapter);
2593 req = embedded_payload(wrb);
2594 ctxt = &req->context;
2596 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2597 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2599 req->hdr.domain = domain;
2600 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2602 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2603 be_dws_cpu_to_le(req->context, sizeof(req->context));
2605 status = be_mcc_notify_wait(adapter);
2607 struct be_cmd_resp_get_hsw_config *resp =
2608 embedded_payload(wrb);
2609 be_dws_le_to_cpu(&resp->context,
2610 sizeof(resp->context));
2611 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2612 pvid, &resp->context);
2613 *pvid = le16_to_cpu(vid);
2617 spin_unlock_bh(&adapter->mcc_lock);
2621 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2623 struct be_mcc_wrb *wrb;
2624 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2626 int payload_len = sizeof(*req);
2627 struct be_dma_mem cmd;
2629 memset(&cmd, 0, sizeof(struct be_dma_mem));
2630 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2631 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2634 dev_err(&adapter->pdev->dev,
2635 "Memory allocation failure\n");
2639 if (mutex_lock_interruptible(&adapter->mbox_lock))
2642 wrb = wrb_from_mbox(adapter);
2650 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2651 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2652 payload_len, wrb, &cmd);
2654 req->hdr.version = 1;
2655 req->query_options = BE_GET_WOL_CAP;
2657 status = be_mbox_notify_wait(adapter);
2659 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2660 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2662 /* the command could succeed misleadingly on old f/w
2663 * which is not aware of the V1 version. fake an error. */
2664 if (resp->hdr.response_length < payload_len) {
2668 adapter->wol_cap = resp->wol_settings;
2671 mutex_unlock(&adapter->mbox_lock);
2672 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2676 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2677 struct be_dma_mem *cmd)
2679 struct be_mcc_wrb *wrb;
2680 struct be_cmd_req_get_ext_fat_caps *req;
2683 if (mutex_lock_interruptible(&adapter->mbox_lock))
2686 wrb = wrb_from_mbox(adapter);
2693 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2694 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2695 cmd->size, wrb, cmd);
2696 req->parameter_type = cpu_to_le32(1);
2698 status = be_mbox_notify_wait(adapter);
2700 mutex_unlock(&adapter->mbox_lock);
2704 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2705 struct be_dma_mem *cmd,
2706 struct be_fat_conf_params *configs)
2708 struct be_mcc_wrb *wrb;
2709 struct be_cmd_req_set_ext_fat_caps *req;
2712 spin_lock_bh(&adapter->mcc_lock);
2714 wrb = wrb_from_mccq(adapter);
2721 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2722 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2723 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2724 cmd->size, wrb, cmd);
2726 status = be_mcc_notify_wait(adapter);
2728 spin_unlock_bh(&adapter->mcc_lock);
2732 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2734 struct be_mcc_wrb *wrb;
2735 struct be_cmd_req_get_port_name *req;
2738 if (!lancer_chip(adapter)) {
2739 *port_name = adapter->hba_port_num + '0';
2743 spin_lock_bh(&adapter->mcc_lock);
2745 wrb = wrb_from_mccq(adapter);
2751 req = embedded_payload(wrb);
2753 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2754 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2756 req->hdr.version = 1;
2758 status = be_mcc_notify_wait(adapter);
2760 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2761 *port_name = resp->port_name[adapter->hba_port_num];
2763 *port_name = adapter->hba_port_num + '0';
2766 spin_unlock_bh(&adapter->mcc_lock);
2770 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2771 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2773 struct be_adapter *adapter = netdev_priv(netdev_handle);
2774 struct be_mcc_wrb *wrb;
2775 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
2776 struct be_cmd_req_hdr *req;
2777 struct be_cmd_resp_hdr *resp;
2780 spin_lock_bh(&adapter->mcc_lock);
2782 wrb = wrb_from_mccq(adapter);
2787 req = embedded_payload(wrb);
2788 resp = embedded_payload(wrb);
2790 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
2791 hdr->opcode, wrb_payload_size, wrb, NULL);
2792 memcpy(req, wrb_payload, wrb_payload_size);
2793 be_dws_cpu_to_le(req, wrb_payload_size);
2795 status = be_mcc_notify_wait(adapter);
2797 *cmd_status = (status & 0xffff);
2800 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
2801 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
2803 spin_unlock_bh(&adapter->mcc_lock);
2806 EXPORT_SYMBOL(be_roce_mcc_cmd);