2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
25 static void fjes_hw_update_zone_task(struct work_struct *);
26 static void fjes_hw_epstop_task(struct work_struct *);
28 /* supported MTU list */
29 const u32 fjes_support_mtu[] = {
30 FJES_MTU_DEFINE(8 * 1024),
31 FJES_MTU_DEFINE(16 * 1024),
32 FJES_MTU_DEFINE(32 * 1024),
33 FJES_MTU_DEFINE(64 * 1024),
37 u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
42 value = readl(&base[reg]);
47 static u8 *fjes_hw_iomap(struct fjes_hw *hw)
51 if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
53 pr_err("request_mem_region failed\n");
57 base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
62 static void fjes_hw_iounmap(struct fjes_hw *hw)
65 release_mem_region(hw->hw_res.start, hw->hw_res.size);
68 int fjes_hw_reset(struct fjes_hw *hw)
75 wr32(XSCT_DCTL, dctl.reg);
77 timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
78 dctl.reg = rd32(XSCT_DCTL);
79 while ((dctl.bits.reset == 1) && (timeout > 0)) {
81 dctl.reg = rd32(XSCT_DCTL);
85 return timeout > 0 ? 0 : -EIO;
88 static int fjes_hw_get_max_epid(struct fjes_hw *hw)
90 union REG_MAX_EP info;
92 info.reg = rd32(XSCT_MAX_EP);
94 return info.bits.maxep;
97 static int fjes_hw_get_my_epid(struct fjes_hw *hw)
99 union REG_OWNER_EPID info;
101 info.reg = rd32(XSCT_OWNER_EPID);
103 return info.bits.epid;
106 static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
110 size = sizeof(struct fjes_device_shared_info) +
111 (sizeof(u8) * hw->max_epid);
112 hw->hw_info.share = kzalloc(size, GFP_KERNEL);
113 if (!hw->hw_info.share)
116 hw->hw_info.share->epnum = hw->max_epid;
121 static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
123 kfree(hw->hw_info.share);
124 hw->hw_info.share = NULL;
127 static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
131 mem = vzalloc(EP_BUFFER_SIZE);
136 epbh->size = EP_BUFFER_SIZE;
138 epbh->info = (union ep_buffer_info *)mem;
139 epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
144 static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
156 void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
158 union ep_buffer_info *info = epbh->info;
159 u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
162 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
163 vlan_id[i] = info->v1i.vlan_id[i];
165 memset(info, 0, sizeof(union ep_buffer_info));
167 info->v1i.version = 0; /* version 0 */
169 for (i = 0; i < ETH_ALEN; i++)
170 info->v1i.mac_addr[i] = mac_addr[i];
175 info->v1i.info_size = sizeof(union ep_buffer_info);
176 info->v1i.buffer_size = epbh->size - info->v1i.info_size;
178 info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
179 info->v1i.count_max =
180 EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
182 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
183 info->v1i.vlan_id[i] = vlan_id[i];
187 fjes_hw_init_command_registers(struct fjes_hw *hw,
188 struct fjes_device_command_param *param)
190 /* Request Buffer length */
191 wr32(XSCT_REQBL, (__le32)(param->req_len));
192 /* Response Buffer Length */
193 wr32(XSCT_RESPBL, (__le32)(param->res_len));
195 /* Request Buffer Address */
197 (__le32)(param->req_start & GENMASK_ULL(31, 0)));
199 (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
201 /* Response Buffer Address */
203 (__le32)(param->res_start & GENMASK_ULL(31, 0)));
205 (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
207 /* Share status address */
209 (__le32)(param->share_start & GENMASK_ULL(31, 0)));
211 (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
214 static int fjes_hw_setup(struct fjes_hw *hw)
216 u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
217 struct fjes_device_command_param param;
218 struct ep_share_mem_info *buf_pair;
224 hw->hw_info.max_epid = &hw->max_epid;
225 hw->hw_info.my_epid = &hw->my_epid;
227 buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
232 hw->ep_shm_info = (struct ep_share_mem_info *)buf;
234 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
235 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
236 if (!(hw->hw_info.req_buf))
239 hw->hw_info.req_buf_size = mem_size;
241 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
242 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
243 if (!(hw->hw_info.res_buf))
246 hw->hw_info.res_buf_size = mem_size;
248 result = fjes_hw_alloc_shared_status_region(hw);
252 hw->hw_info.buffer_share_bit = 0;
253 hw->hw_info.buffer_unshare_reserve_bit = 0;
255 for (epidx = 0; epidx < hw->max_epid; epidx++) {
256 if (epidx != hw->my_epid) {
257 buf_pair = &hw->ep_shm_info[epidx];
259 result = fjes_hw_alloc_epbuf(&buf_pair->tx);
263 result = fjes_hw_alloc_epbuf(&buf_pair->rx);
267 fjes_hw_setup_epbuf(&buf_pair->tx, mac,
268 fjes_support_mtu[0]);
269 fjes_hw_setup_epbuf(&buf_pair->rx, mac,
270 fjes_support_mtu[0]);
274 memset(¶m, 0, sizeof(param));
276 param.req_len = hw->hw_info.req_buf_size;
277 param.req_start = __pa(hw->hw_info.req_buf);
278 param.res_len = hw->hw_info.res_buf_size;
279 param.res_start = __pa(hw->hw_info.res_buf);
281 param.share_start = __pa(hw->hw_info.share->ep_status);
283 fjes_hw_init_command_registers(hw, ¶m);
288 static void fjes_hw_cleanup(struct fjes_hw *hw)
292 if (!hw->ep_shm_info)
295 fjes_hw_free_shared_status_region(hw);
297 kfree(hw->hw_info.req_buf);
298 hw->hw_info.req_buf = NULL;
300 kfree(hw->hw_info.res_buf);
301 hw->hw_info.res_buf = NULL;
303 for (epidx = 0; epidx < hw->max_epid ; epidx++) {
304 if (epidx == hw->my_epid)
306 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
307 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
310 kfree(hw->ep_shm_info);
311 hw->ep_shm_info = NULL;
314 int fjes_hw_init(struct fjes_hw *hw)
318 hw->base = fjes_hw_iomap(hw);
322 ret = fjes_hw_reset(hw);
326 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
328 INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
329 INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
331 mutex_init(&hw->hw_info.lock);
333 hw->max_epid = fjes_hw_get_max_epid(hw);
334 hw->my_epid = fjes_hw_get_my_epid(hw);
336 if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
339 ret = fjes_hw_setup(hw);
344 void fjes_hw_exit(struct fjes_hw *hw)
349 ret = fjes_hw_reset(hw);
351 pr_err("%s: reset error", __func__);
359 cancel_work_sync(&hw->update_zone_task);
360 cancel_work_sync(&hw->epstop_task);
363 static enum fjes_dev_command_response_e
364 fjes_hw_issue_request_command(struct fjes_hw *hw,
365 enum fjes_dev_command_request_type type)
367 enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
373 cr.bits.req_start = 1;
374 cr.bits.req_code = type;
375 wr32(XSCT_CR, cr.reg);
376 cr.reg = rd32(XSCT_CR);
378 if (cr.bits.error == 0) {
379 timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
380 cs.reg = rd32(XSCT_CS);
382 while ((cs.bits.complete != 1) && timeout > 0) {
384 cs.reg = rd32(XSCT_CS);
388 if (cs.bits.complete == 1)
389 ret = FJES_CMD_STATUS_NORMAL;
390 else if (timeout <= 0)
391 ret = FJES_CMD_STATUS_TIMEOUT;
394 switch (cr.bits.err_info) {
395 case FJES_CMD_REQ_ERR_INFO_PARAM:
396 ret = FJES_CMD_STATUS_ERROR_PARAM;
398 case FJES_CMD_REQ_ERR_INFO_STATUS:
399 ret = FJES_CMD_STATUS_ERROR_STATUS;
402 ret = FJES_CMD_STATUS_UNKNOWN;
410 int fjes_hw_request_info(struct fjes_hw *hw)
412 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
413 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
414 enum fjes_dev_command_response_e ret;
417 memset(req_buf, 0, hw->hw_info.req_buf_size);
418 memset(res_buf, 0, hw->hw_info.res_buf_size);
420 req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
422 res_buf->info.length = 0;
423 res_buf->info.code = 0;
425 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
429 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
430 res_buf->info.length) {
432 } else if (ret == FJES_CMD_STATUS_NORMAL) {
433 switch (res_buf->info.code) {
434 case FJES_CMD_REQ_RES_CODE_NORMAL:
443 case FJES_CMD_STATUS_UNKNOWN:
446 case FJES_CMD_STATUS_TIMEOUT:
449 case FJES_CMD_STATUS_ERROR_PARAM:
452 case FJES_CMD_STATUS_ERROR_STATUS:
464 int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
465 struct ep_share_mem_info *buf_pair)
467 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
468 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
469 enum fjes_dev_command_response_e ret;
476 if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
479 memset(req_buf, 0, hw->hw_info.req_buf_size);
480 memset(res_buf, 0, hw->hw_info.res_buf_size);
482 req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
485 req_buf->share_buffer.epid = dest_epid;
488 req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
489 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
490 for (i = 0; i < page_count; i++) {
491 addr = ((u8 *)(buf_pair->tx.buffer)) +
492 (i * EP_BUFFER_INFO_SIZE);
493 req_buf->share_buffer.buffer[idx++] =
494 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
495 offset_in_page(addr));
498 req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
499 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
500 for (i = 0; i < page_count; i++) {
501 addr = ((u8 *)(buf_pair->rx.buffer)) +
502 (i * EP_BUFFER_INFO_SIZE);
503 req_buf->share_buffer.buffer[idx++] =
504 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
505 offset_in_page(addr));
508 res_buf->share_buffer.length = 0;
509 res_buf->share_buffer.code = 0;
511 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
513 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
514 while ((ret == FJES_CMD_STATUS_NORMAL) &&
515 (res_buf->share_buffer.length ==
516 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
517 (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
519 msleep(200 + hw->my_epid * 20);
520 timeout -= (200 + hw->my_epid * 20);
522 res_buf->share_buffer.length = 0;
523 res_buf->share_buffer.code = 0;
525 ret = fjes_hw_issue_request_command(
526 hw, FJES_CMD_REQ_SHARE_BUFFER);
531 if (res_buf->share_buffer.length !=
532 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
534 else if (ret == FJES_CMD_STATUS_NORMAL) {
535 switch (res_buf->share_buffer.code) {
536 case FJES_CMD_REQ_RES_CODE_NORMAL:
538 set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
540 case FJES_CMD_REQ_RES_CODE_BUSY:
549 case FJES_CMD_STATUS_UNKNOWN:
552 case FJES_CMD_STATUS_TIMEOUT:
555 case FJES_CMD_STATUS_ERROR_PARAM:
556 case FJES_CMD_STATUS_ERROR_STATUS:
566 int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
568 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
569 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
570 struct fjes_device_shared_info *share = hw->hw_info.share;
571 enum fjes_dev_command_response_e ret;
578 if (!req_buf || !res_buf || !share)
581 if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
584 memset(req_buf, 0, hw->hw_info.req_buf_size);
585 memset(res_buf, 0, hw->hw_info.res_buf_size);
587 req_buf->unshare_buffer.length =
588 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
589 req_buf->unshare_buffer.epid = dest_epid;
591 res_buf->unshare_buffer.length = 0;
592 res_buf->unshare_buffer.code = 0;
594 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
596 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
597 while ((ret == FJES_CMD_STATUS_NORMAL) &&
598 (res_buf->unshare_buffer.length ==
599 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
600 (res_buf->unshare_buffer.code ==
601 FJES_CMD_REQ_RES_CODE_BUSY) &&
603 msleep(200 + hw->my_epid * 20);
604 timeout -= (200 + hw->my_epid * 20);
606 res_buf->unshare_buffer.length = 0;
607 res_buf->unshare_buffer.code = 0;
610 fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
615 if (res_buf->unshare_buffer.length !=
616 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
618 } else if (ret == FJES_CMD_STATUS_NORMAL) {
619 switch (res_buf->unshare_buffer.code) {
620 case FJES_CMD_REQ_RES_CODE_NORMAL:
622 clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
624 case FJES_CMD_REQ_RES_CODE_BUSY:
633 case FJES_CMD_STATUS_UNKNOWN:
636 case FJES_CMD_STATUS_TIMEOUT:
639 case FJES_CMD_STATUS_ERROR_PARAM:
640 case FJES_CMD_STATUS_ERROR_STATUS:
650 int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
651 enum REG_ICTL_MASK mask)
653 u32 ig = mask | dest_epid;
655 wr32(XSCT_IG, cpu_to_le32(ig));
660 u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
664 cur_is = rd32(XSCT_IS);
669 void fjes_hw_set_irqmask(struct fjes_hw *hw,
670 enum REG_ICTL_MASK intr_mask, bool mask)
673 wr32(XSCT_IMS, intr_mask);
675 wr32(XSCT_IMC, intr_mask);
678 bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
680 if (epid >= hw->max_epid)
683 if ((hw->ep_shm_info[epid].es_status !=
684 FJES_ZONING_STATUS_ENABLE) ||
685 (hw->ep_shm_info[hw->my_epid].zone ==
686 FJES_ZONING_ZONE_TYPE_NONE))
689 return (hw->ep_shm_info[epid].zone ==
690 hw->ep_shm_info[hw->my_epid].zone);
693 int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
698 if (dest_epid < share->epnum)
699 value = share->ep_status[dest_epid];
704 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
706 return test_bit(src_epid, &hw->txrx_stop_req_bit);
709 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
711 return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
712 FJES_RX_STOP_REQ_DONE);
715 enum ep_partner_status
716 fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
718 enum ep_partner_status status;
720 if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
721 if (fjes_hw_epid_is_stop_requested(hw, epid)) {
722 status = EP_PARTNER_WAITING;
724 if (fjes_hw_epid_is_stop_process_done(hw, epid))
725 status = EP_PARTNER_COMPLETE;
727 status = EP_PARTNER_SHARED;
730 status = EP_PARTNER_UNSHARE;
736 void fjes_hw_raise_epstop(struct fjes_hw *hw)
738 enum ep_partner_status status;
741 for (epidx = 0; epidx < hw->max_epid; epidx++) {
742 if (epidx == hw->my_epid)
745 status = fjes_hw_get_partner_ep_status(hw, epidx);
747 case EP_PARTNER_SHARED:
748 fjes_hw_raise_interrupt(hw, epidx,
749 REG_ICTL_MASK_TXRX_STOP_REQ);
755 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
756 set_bit(epidx, &hw->txrx_stop_req_bit);
758 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
759 FJES_RX_STOP_REQ_REQUEST;
763 int fjes_hw_wait_epstop(struct fjes_hw *hw)
765 enum ep_partner_status status;
766 union ep_buffer_info *info;
770 while (hw->hw_info.buffer_unshare_reserve_bit &&
771 (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
772 for (epidx = 0; epidx < hw->max_epid; epidx++) {
773 if (epidx == hw->my_epid)
775 status = fjes_hw_epid_is_shared(hw->hw_info.share,
777 info = hw->ep_shm_info[epidx].rx.info;
779 (info->v1i.rx_status &
780 FJES_RX_STOP_REQ_DONE)) &&
782 &hw->hw_info.buffer_unshare_reserve_bit)) {
784 &hw->hw_info.buffer_unshare_reserve_bit);
792 for (epidx = 0; epidx < hw->max_epid; epidx++) {
793 if (epidx == hw->my_epid)
795 if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
797 &hw->hw_info.buffer_unshare_reserve_bit);
800 return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
804 bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
806 union ep_buffer_info *info = epbh->info;
808 return (info->common.version == version);
811 bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
813 union ep_buffer_info *info = epbh->info;
815 return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
818 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
820 union ep_buffer_info *info = epbh->info;
827 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
828 if (vlan_id == info->v1i.vlan_id[i]) {
837 bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
839 union ep_buffer_info *info = epbh->info;
842 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
843 if (info->v1i.vlan_id[i] == 0) {
844 info->v1i.vlan_id[i] = vlan_id;
851 void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
853 union ep_buffer_info *info = epbh->info;
857 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
858 if (vlan_id == info->v1i.vlan_id[i])
859 info->v1i.vlan_id[i] = 0;
864 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
866 union ep_buffer_info *info = epbh->info;
868 if (info->v1i.count_max == 0)
871 return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
872 info->v1i.count_max);
875 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
878 union ep_buffer_info *info = epbh->info;
879 struct esmem_frame *ring_frame;
882 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
884 info->v1i.count_max) *
885 info->v1i.frame_max]);
887 *psize = (size_t)ring_frame->frame_size;
889 frame = ring_frame->frame_data;
894 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
896 union ep_buffer_info *info = epbh->info;
898 if (fjes_hw_epbuf_rx_is_empty(epbh))
901 EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
904 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
905 void *frame, size_t size)
907 union ep_buffer_info *info = epbh->info;
908 struct esmem_frame *ring_frame;
910 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
913 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
915 info->v1i.count_max) *
916 info->v1i.frame_max]);
918 ring_frame->frame_size = size;
919 memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
921 EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
926 static void fjes_hw_update_zone_task(struct work_struct *work)
928 struct fjes_hw *hw = container_of(work,
929 struct fjes_hw, update_zone_task);
931 struct my_s {u8 es_status; u8 zone; } *info;
932 union fjes_device_command_res *res_buf;
933 enum ep_partner_status pstatus;
935 struct fjes_adapter *adapter;
936 struct net_device *netdev;
938 ulong unshare_bit = 0;
945 adapter = (struct fjes_adapter *)hw->back;
946 netdev = adapter->netdev;
947 res_buf = hw->hw_info.res_buf;
948 info = (struct my_s *)&res_buf->info.info;
950 mutex_lock(&hw->hw_info.lock);
952 ret = fjes_hw_request_info(hw);
957 if (!work_pending(&adapter->force_close_task)) {
958 adapter->force_reset = true;
959 schedule_work(&adapter->force_close_task);
965 for (epidx = 0; epidx < hw->max_epid; epidx++) {
966 if (epidx == hw->my_epid) {
967 hw->ep_shm_info[epidx].es_status =
968 info[epidx].es_status;
969 hw->ep_shm_info[epidx].zone =
974 pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
976 case EP_PARTNER_UNSHARE:
978 if ((info[epidx].zone !=
979 FJES_ZONING_ZONE_TYPE_NONE) &&
980 (info[epidx].es_status ==
981 FJES_ZONING_STATUS_ENABLE) &&
983 info[hw->my_epid].zone))
984 set_bit(epidx, &share_bit);
986 set_bit(epidx, &unshare_bit);
989 case EP_PARTNER_COMPLETE:
990 case EP_PARTNER_WAITING:
991 if ((info[epidx].zone ==
992 FJES_ZONING_ZONE_TYPE_NONE) ||
993 (info[epidx].es_status !=
994 FJES_ZONING_STATUS_ENABLE) ||
996 info[hw->my_epid].zone)) {
998 &adapter->unshare_watch_bitmask);
1000 &hw->hw_info.buffer_unshare_reserve_bit);
1004 case EP_PARTNER_SHARED:
1005 if ((info[epidx].zone ==
1006 FJES_ZONING_ZONE_TYPE_NONE) ||
1007 (info[epidx].es_status !=
1008 FJES_ZONING_STATUS_ENABLE) ||
1009 (info[epidx].zone !=
1010 info[hw->my_epid].zone))
1011 set_bit(epidx, &irq_bit);
1015 hw->ep_shm_info[epidx].es_status =
1016 info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1022 mutex_unlock(&hw->hw_info.lock);
1024 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1025 if (epidx == hw->my_epid)
1028 if (test_bit(epidx, &share_bit)) {
1029 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1030 netdev->dev_addr, netdev->mtu);
1032 mutex_lock(&hw->hw_info.lock);
1034 ret = fjes_hw_register_buff_addr(
1035 hw, epidx, &hw->ep_shm_info[epidx]);
1043 if (!work_pending(&adapter->force_close_task)) {
1044 adapter->force_reset = true;
1046 &adapter->force_close_task);
1050 mutex_unlock(&hw->hw_info.lock);
1053 if (test_bit(epidx, &unshare_bit)) {
1054 mutex_lock(&hw->hw_info.lock);
1056 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1064 if (!work_pending(&adapter->force_close_task)) {
1065 adapter->force_reset = true;
1067 &adapter->force_close_task);
1072 mutex_unlock(&hw->hw_info.lock);
1075 fjes_hw_setup_epbuf(
1076 &hw->ep_shm_info[epidx].tx,
1077 netdev->dev_addr, netdev->mtu);
1080 if (test_bit(epidx, &irq_bit)) {
1081 fjes_hw_raise_interrupt(hw, epidx,
1082 REG_ICTL_MASK_TXRX_STOP_REQ);
1084 set_bit(epidx, &hw->txrx_stop_req_bit);
1085 hw->ep_shm_info[epidx].tx.
1086 info->v1i.rx_status |=
1087 FJES_RX_STOP_REQ_REQUEST;
1088 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1092 if (irq_bit || adapter->unshare_watch_bitmask) {
1093 if (!work_pending(&adapter->unshare_watch_task))
1094 queue_work(adapter->control_wq,
1095 &adapter->unshare_watch_task);
1099 static void fjes_hw_epstop_task(struct work_struct *work)
1101 struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
1102 struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
1107 while ((remain_bit = hw->epstop_req_bit)) {
1108 for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
1109 if (remain_bit & 1) {
1110 hw->ep_shm_info[epid_bit].
1111 tx.info->v1i.rx_status |=
1112 FJES_RX_STOP_REQ_DONE;
1114 clear_bit(epid_bit, &hw->epstop_req_bit);
1116 &adapter->unshare_watch_bitmask);
1118 if (!work_pending(&adapter->unshare_watch_task))
1120 adapter->control_wq,
1121 &adapter->unshare_watch_task);