2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Transport Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
65 #define NTB_TRANSPORT_VERSION 4
66 #define NTB_TRANSPORT_VER "4"
67 #define NTB_TRANSPORT_NAME "ntb_transport"
68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
71 MODULE_VERSION(NTB_TRANSPORT_VER);
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_AUTHOR("Intel Corporation");
75 static unsigned long max_mw_size;
76 module_param(max_mw_size, ulong, 0644);
77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
79 static unsigned int transport_mtu = 0x10000;
80 module_param(transport_mtu, uint, 0644);
81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
83 static unsigned char max_num_clients;
84 module_param(max_num_clients, byte, 0644);
85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
87 static unsigned int copy_bytes = 1024;
88 module_param(copy_bytes, uint, 0644);
89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
92 module_param(use_dma, bool, 0644);
93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
95 static struct dentry *nt_debugfs_dir;
97 struct ntb_queue_entry {
98 /* ntb_queue list reference */
99 struct list_head entry;
100 /* pointers to data to be transferred */
106 struct ntb_transport_qp *qp;
108 struct ntb_payload_header __iomem *tx_hdr;
109 struct ntb_payload_header *rx_hdr;
118 struct ntb_transport_qp {
119 struct ntb_transport_ctx *transport;
120 struct ntb_dev *ndev;
122 struct dma_chan *dma_chan;
127 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
130 struct ntb_rx_info __iomem *rx_info;
131 struct ntb_rx_info *remote_rx_info;
133 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
134 void *data, int len);
135 struct list_head tx_free_q;
136 spinlock_t ntb_tx_free_q_lock;
138 dma_addr_t tx_mw_phys;
139 unsigned int tx_index;
140 unsigned int tx_max_entry;
141 unsigned int tx_max_frame;
143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void *data, int len);
145 struct list_head rx_post_q;
146 struct list_head rx_pend_q;
147 struct list_head rx_free_q;
148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
149 spinlock_t ntb_rx_q_lock;
151 unsigned int rx_index;
152 unsigned int rx_max_entry;
153 unsigned int rx_max_frame;
154 dma_cookie_t last_cookie;
155 struct tasklet_struct rxc_db_work;
157 void (*event_handler)(void *data, int status);
158 struct delayed_work link_work;
159 struct work_struct link_cleanup;
161 struct dentry *debugfs_dir;
162 struct dentry *debugfs_stats;
181 struct ntb_transport_mw {
182 phys_addr_t phys_addr;
183 resource_size_t phys_size;
184 resource_size_t xlat_align;
185 resource_size_t xlat_align_size;
193 struct ntb_transport_client_dev {
194 struct list_head entry;
195 struct ntb_transport_ctx *nt;
199 struct ntb_transport_ctx {
200 struct list_head entry;
201 struct list_head client_devs;
203 struct ntb_dev *ndev;
205 struct ntb_transport_mw *mw_vec;
206 struct ntb_transport_qp *qp_vec;
207 unsigned int mw_count;
208 unsigned int qp_count;
213 struct delayed_work link_work;
214 struct work_struct link_cleanup;
216 struct dentry *debugfs_node_dir;
220 DESC_DONE_FLAG = BIT(0),
221 LINK_DOWN_FLAG = BIT(1),
224 struct ntb_payload_header {
242 #define dev_client_dev(__dev) \
243 container_of((__dev), struct ntb_transport_client_dev, dev)
245 #define drv_client(__drv) \
246 container_of((__drv), struct ntb_transport_client, driver)
248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
249 #define NTB_QP_DEF_NUM_ENTRIES 100
250 #define NTB_LINK_DOWN_TIMEOUT 10
252 static void ntb_transport_rxc_db(unsigned long data);
253 static const struct ntb_ctx_ops ntb_transport_ops;
254 static struct ntb_client ntb_transport_client;
256 static int ntb_transport_bus_match(struct device *dev,
257 struct device_driver *drv)
259 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
262 static int ntb_transport_bus_probe(struct device *dev)
264 const struct ntb_transport_client *client;
269 client = drv_client(dev->driver);
270 rc = client->probe(dev);
277 static int ntb_transport_bus_remove(struct device *dev)
279 const struct ntb_transport_client *client;
281 client = drv_client(dev->driver);
289 static struct bus_type ntb_transport_bus = {
290 .name = "ntb_transport",
291 .match = ntb_transport_bus_match,
292 .probe = ntb_transport_bus_probe,
293 .remove = ntb_transport_bus_remove,
296 static LIST_HEAD(ntb_transport_list);
298 static int ntb_bus_init(struct ntb_transport_ctx *nt)
300 list_add(&nt->entry, &ntb_transport_list);
304 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
306 struct ntb_transport_client_dev *client_dev, *cd;
308 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
309 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
310 dev_name(&client_dev->dev));
311 list_del(&client_dev->entry);
312 device_unregister(&client_dev->dev);
315 list_del(&nt->entry);
318 static void ntb_transport_client_release(struct device *dev)
320 struct ntb_transport_client_dev *client_dev;
322 client_dev = dev_client_dev(dev);
327 * ntb_transport_unregister_client_dev - Unregister NTB client device
328 * @device_name: Name of NTB client device
330 * Unregister an NTB client device with the NTB transport layer
332 void ntb_transport_unregister_client_dev(char *device_name)
334 struct ntb_transport_client_dev *client, *cd;
335 struct ntb_transport_ctx *nt;
337 list_for_each_entry(nt, &ntb_transport_list, entry)
338 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
339 if (!strncmp(dev_name(&client->dev), device_name,
340 strlen(device_name))) {
341 list_del(&client->entry);
342 device_unregister(&client->dev);
345 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
348 * ntb_transport_register_client_dev - Register NTB client device
349 * @device_name: Name of NTB client device
351 * Register an NTB client device with the NTB transport layer
353 int ntb_transport_register_client_dev(char *device_name)
355 struct ntb_transport_client_dev *client_dev;
356 struct ntb_transport_ctx *nt;
360 if (list_empty(&ntb_transport_list))
363 list_for_each_entry(nt, &ntb_transport_list, entry) {
366 node = dev_to_node(&nt->ndev->dev);
368 client_dev = kzalloc_node(sizeof(*client_dev),
375 dev = &client_dev->dev;
377 /* setup and register client devices */
378 dev_set_name(dev, "%s%d", device_name, i);
379 dev->bus = &ntb_transport_bus;
380 dev->release = ntb_transport_client_release;
381 dev->parent = &nt->ndev->dev;
383 rc = device_register(dev);
389 list_add_tail(&client_dev->entry, &nt->client_devs);
396 ntb_transport_unregister_client_dev(device_name);
400 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
403 * ntb_transport_register_client - Register NTB client driver
404 * @drv: NTB client driver to be registered
406 * Register an NTB client driver with the NTB transport layer
408 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
410 int ntb_transport_register_client(struct ntb_transport_client *drv)
412 drv->driver.bus = &ntb_transport_bus;
414 if (list_empty(&ntb_transport_list))
417 return driver_register(&drv->driver);
419 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
422 * ntb_transport_unregister_client - Unregister NTB client driver
423 * @drv: NTB client driver to be unregistered
425 * Unregister an NTB client driver with the NTB transport layer
427 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
429 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
431 driver_unregister(&drv->driver);
433 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
435 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
438 struct ntb_transport_qp *qp;
440 ssize_t ret, out_offset, out_count;
442 qp = filp->private_data;
444 if (!qp || !qp->link_is_up)
449 buf = kmalloc(out_count, GFP_KERNEL);
454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
457 "rx_bytes - \t%llu\n", qp->rx_bytes);
458 out_offset += snprintf(buf + out_offset, out_count - out_offset,
459 "rx_pkts - \t%llu\n", qp->rx_pkts);
460 out_offset += snprintf(buf + out_offset, out_count - out_offset,
461 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
462 out_offset += snprintf(buf + out_offset, out_count - out_offset,
463 "rx_async - \t%llu\n", qp->rx_async);
464 out_offset += snprintf(buf + out_offset, out_count - out_offset,
465 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
466 out_offset += snprintf(buf + out_offset, out_count - out_offset,
467 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
468 out_offset += snprintf(buf + out_offset, out_count - out_offset,
469 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
470 out_offset += snprintf(buf + out_offset, out_count - out_offset,
471 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
472 out_offset += snprintf(buf + out_offset, out_count - out_offset,
473 "rx_buff - \t%p\n", qp->rx_buff);
474 out_offset += snprintf(buf + out_offset, out_count - out_offset,
475 "rx_index - \t%u\n", qp->rx_index);
476 out_offset += snprintf(buf + out_offset, out_count - out_offset,
477 "rx_max_entry - \t%u\n", qp->rx_max_entry);
479 out_offset += snprintf(buf + out_offset, out_count - out_offset,
480 "tx_bytes - \t%llu\n", qp->tx_bytes);
481 out_offset += snprintf(buf + out_offset, out_count - out_offset,
482 "tx_pkts - \t%llu\n", qp->tx_pkts);
483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
484 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
485 out_offset += snprintf(buf + out_offset, out_count - out_offset,
486 "tx_async - \t%llu\n", qp->tx_async);
487 out_offset += snprintf(buf + out_offset, out_count - out_offset,
488 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
489 out_offset += snprintf(buf + out_offset, out_count - out_offset,
490 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
491 out_offset += snprintf(buf + out_offset, out_count - out_offset,
492 "tx_mw - \t%p\n", qp->tx_mw);
493 out_offset += snprintf(buf + out_offset, out_count - out_offset,
494 "tx_index - \t%u\n", qp->tx_index);
495 out_offset += snprintf(buf + out_offset, out_count - out_offset,
496 "tx_max_entry - \t%u\n", qp->tx_max_entry);
497 out_offset += snprintf(buf + out_offset, out_count - out_offset,
498 "qp->remote_rx_info->entry - \t%u\n",
499 qp->remote_rx_info->entry);
500 out_offset += snprintf(buf + out_offset, out_count - out_offset,
502 ntb_transport_tx_free_entry(qp));
504 out_offset += snprintf(buf + out_offset, out_count - out_offset,
506 qp->link_is_up ? "Up" : "Down");
507 if (out_offset > out_count)
508 out_offset = out_count;
510 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
515 static const struct file_operations ntb_qp_debugfs_stats = {
516 .owner = THIS_MODULE,
518 .read = debugfs_read,
521 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
522 struct list_head *list)
526 spin_lock_irqsave(lock, flags);
527 list_add_tail(entry, list);
528 spin_unlock_irqrestore(lock, flags);
531 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
532 struct list_head *list)
534 struct ntb_queue_entry *entry;
537 spin_lock_irqsave(lock, flags);
538 if (list_empty(list)) {
542 entry = list_first_entry(list, struct ntb_queue_entry, entry);
543 list_del(&entry->entry);
546 spin_unlock_irqrestore(lock, flags);
551 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
552 struct list_head *list,
553 struct list_head *to_list)
555 struct ntb_queue_entry *entry;
558 spin_lock_irqsave(lock, flags);
560 if (list_empty(list)) {
563 entry = list_first_entry(list, struct ntb_queue_entry, entry);
564 list_move_tail(&entry->entry, to_list);
567 spin_unlock_irqrestore(lock, flags);
572 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
575 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
576 struct ntb_transport_mw *mw;
577 unsigned int rx_size, num_qps_mw;
578 unsigned int mw_num, mw_count, qp_count;
581 mw_count = nt->mw_count;
582 qp_count = nt->qp_count;
584 mw_num = QP_TO_MW(nt, qp_num);
585 mw = &nt->mw_vec[mw_num];
590 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
591 num_qps_mw = qp_count / mw_count + 1;
593 num_qps_mw = qp_count / mw_count;
595 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
596 qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
597 rx_size -= sizeof(struct ntb_rx_info);
599 qp->remote_rx_info = qp->rx_buff + rx_size;
601 /* Due to housekeeping, there must be atleast 2 buffs */
602 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
603 qp->rx_max_entry = rx_size / qp->rx_max_frame;
606 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
608 /* setup the hdr offsets with 0's */
609 for (i = 0; i < qp->rx_max_entry; i++) {
610 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
611 sizeof(struct ntb_payload_header));
612 memset(offset, 0, sizeof(struct ntb_payload_header));
622 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
624 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
625 struct pci_dev *pdev = nt->ndev->pdev;
630 ntb_mw_clear_trans(nt->ndev, num_mw);
631 dma_free_coherent(&pdev->dev, mw->buff_size,
632 mw->virt_addr, mw->dma_addr);
635 mw->virt_addr = NULL;
638 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
639 resource_size_t size)
641 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
642 struct pci_dev *pdev = nt->ndev->pdev;
643 size_t xlat_size, buff_size;
649 xlat_size = round_up(size, mw->xlat_align_size);
650 buff_size = round_up(size, mw->xlat_align);
652 /* No need to re-setup */
653 if (mw->xlat_size == xlat_size)
657 ntb_free_mw(nt, num_mw);
659 /* Alloc memory for receiving data. Must be aligned */
660 mw->xlat_size = xlat_size;
661 mw->buff_size = buff_size;
663 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
664 &mw->dma_addr, GFP_KERNEL);
665 if (!mw->virt_addr) {
668 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
674 * we must ensure that the memory address allocated is BAR size
675 * aligned in order for the XLAT register to take the value. This
676 * is a requirement of the hardware. It is recommended to setup CMA
677 * for BAR sizes equal or greater than 4MB.
679 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
680 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
682 ntb_free_mw(nt, num_mw);
686 /* Notify HW the memory location of the receive buffer */
687 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
689 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
690 ntb_free_mw(nt, num_mw);
697 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
699 qp->link_is_up = false;
705 qp->rx_ring_empty = 0;
706 qp->rx_err_no_buf = 0;
707 qp->rx_err_oflow = 0;
713 qp->tx_ring_full = 0;
714 qp->tx_err_no_buf = 0;
719 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
721 struct ntb_transport_ctx *nt = qp->transport;
722 struct pci_dev *pdev = nt->ndev->pdev;
724 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
726 cancel_delayed_work_sync(&qp->link_work);
727 ntb_qp_link_down_reset(qp);
729 if (qp->event_handler)
730 qp->event_handler(qp->cb_data, qp->link_is_up);
733 static void ntb_qp_link_cleanup_work(struct work_struct *work)
735 struct ntb_transport_qp *qp = container_of(work,
736 struct ntb_transport_qp,
738 struct ntb_transport_ctx *nt = qp->transport;
740 ntb_qp_link_cleanup(qp);
743 schedule_delayed_work(&qp->link_work,
744 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
747 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
749 schedule_work(&qp->link_cleanup);
752 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
754 struct ntb_transport_qp *qp;
758 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
760 /* Pass along the info to any clients */
761 for (i = 0; i < nt->qp_count; i++)
762 if (qp_bitmap_alloc & BIT_ULL(i)) {
764 ntb_qp_link_cleanup(qp);
765 cancel_work_sync(&qp->link_cleanup);
766 cancel_delayed_work_sync(&qp->link_work);
770 cancel_delayed_work_sync(&nt->link_work);
772 /* The scratchpad registers keep the values if the remote side
773 * goes down, blast them now to give them a sane value the next
774 * time they are accessed
776 for (i = 0; i < MAX_SPAD; i++)
777 ntb_spad_write(nt->ndev, i, 0);
780 static void ntb_transport_link_cleanup_work(struct work_struct *work)
782 struct ntb_transport_ctx *nt =
783 container_of(work, struct ntb_transport_ctx, link_cleanup);
785 ntb_transport_link_cleanup(nt);
788 static void ntb_transport_event_callback(void *data)
790 struct ntb_transport_ctx *nt = data;
792 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
793 schedule_delayed_work(&nt->link_work, 0);
795 schedule_work(&nt->link_cleanup);
798 static void ntb_transport_link_work(struct work_struct *work)
800 struct ntb_transport_ctx *nt =
801 container_of(work, struct ntb_transport_ctx, link_work.work);
802 struct ntb_dev *ndev = nt->ndev;
803 struct pci_dev *pdev = ndev->pdev;
804 resource_size_t size;
808 /* send the local info, in the opposite order of the way we read it */
809 for (i = 0; i < nt->mw_count; i++) {
810 size = nt->mw_vec[i].phys_size;
812 if (max_mw_size && size > max_mw_size)
815 spad = MW0_SZ_HIGH + (i * 2);
816 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
818 spad = MW0_SZ_LOW + (i * 2);
819 ntb_peer_spad_write(ndev, spad, (u32)size);
822 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
824 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
826 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
828 /* Query the remote side for its info */
829 val = ntb_spad_read(ndev, VERSION);
830 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
831 if (val != NTB_TRANSPORT_VERSION)
834 val = ntb_spad_read(ndev, NUM_QPS);
835 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
836 if (val != nt->qp_count)
839 val = ntb_spad_read(ndev, NUM_MWS);
840 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
841 if (val != nt->mw_count)
844 for (i = 0; i < nt->mw_count; i++) {
847 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
848 val64 = (u64)val << 32;
850 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
853 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
855 rc = ntb_set_mw(nt, i, val64);
860 nt->link_is_up = true;
862 for (i = 0; i < nt->qp_count; i++) {
863 struct ntb_transport_qp *qp = &nt->qp_vec[i];
865 ntb_transport_setup_qp_mw(nt, i);
867 if (qp->client_ready)
868 schedule_delayed_work(&qp->link_work, 0);
874 for (i = 0; i < nt->mw_count; i++)
877 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
878 schedule_delayed_work(&nt->link_work,
879 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
882 static void ntb_qp_link_work(struct work_struct *work)
884 struct ntb_transport_qp *qp = container_of(work,
885 struct ntb_transport_qp,
887 struct pci_dev *pdev = qp->ndev->pdev;
888 struct ntb_transport_ctx *nt = qp->transport;
891 WARN_ON(!nt->link_is_up);
893 val = ntb_spad_read(nt->ndev, QP_LINKS);
895 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
897 /* query remote spad for qp ready bits */
898 ntb_peer_spad_read(nt->ndev, QP_LINKS);
899 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
901 /* See if the remote side is up */
902 if (val & BIT(qp->qp_num)) {
903 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
904 qp->link_is_up = true;
906 if (qp->event_handler)
907 qp->event_handler(qp->cb_data, qp->link_is_up);
909 tasklet_schedule(&qp->rxc_db_work);
910 } else if (nt->link_is_up)
911 schedule_delayed_work(&qp->link_work,
912 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
915 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
918 struct ntb_transport_qp *qp;
919 struct ntb_transport_mw *mw;
921 resource_size_t mw_size;
922 unsigned int num_qps_mw, tx_size;
923 unsigned int mw_num, mw_count, qp_count;
926 mw_count = nt->mw_count;
927 qp_count = nt->qp_count;
929 mw_num = QP_TO_MW(nt, qp_num);
930 mw = &nt->mw_vec[mw_num];
932 qp = &nt->qp_vec[qp_num];
936 qp->client_ready = false;
937 qp->event_handler = NULL;
938 ntb_qp_link_down_reset(qp);
940 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
941 num_qps_mw = qp_count / mw_count + 1;
943 num_qps_mw = qp_count / mw_count;
945 mw_base = nt->mw_vec[mw_num].phys_addr;
946 mw_size = nt->mw_vec[mw_num].phys_size;
948 tx_size = (unsigned int)mw_size / num_qps_mw;
949 qp_offset = tx_size * qp_num / mw_count;
951 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
955 qp->tx_mw_phys = mw_base + qp_offset;
959 tx_size -= sizeof(struct ntb_rx_info);
960 qp->rx_info = qp->tx_mw + tx_size;
962 /* Due to housekeeping, there must be atleast 2 buffs */
963 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
964 qp->tx_max_entry = tx_size / qp->tx_max_frame;
966 if (nt->debugfs_node_dir) {
967 char debugfs_name[4];
969 snprintf(debugfs_name, 4, "qp%d", qp_num);
970 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
971 nt->debugfs_node_dir);
973 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
975 &ntb_qp_debugfs_stats);
977 qp->debugfs_dir = NULL;
978 qp->debugfs_stats = NULL;
981 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
982 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
984 spin_lock_init(&qp->ntb_rx_q_lock);
985 spin_lock_init(&qp->ntb_tx_free_q_lock);
987 INIT_LIST_HEAD(&qp->rx_post_q);
988 INIT_LIST_HEAD(&qp->rx_pend_q);
989 INIT_LIST_HEAD(&qp->rx_free_q);
990 INIT_LIST_HEAD(&qp->tx_free_q);
992 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
998 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1000 struct ntb_transport_ctx *nt;
1001 struct ntb_transport_mw *mw;
1002 unsigned int mw_count, qp_count;
1007 if (ntb_db_is_unsafe(ndev))
1009 "doorbell is unsafe, proceed anyway...\n");
1010 if (ntb_spad_is_unsafe(ndev))
1012 "scratchpad is unsafe, proceed anyway...\n");
1014 node = dev_to_node(&ndev->dev);
1016 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1022 mw_count = ntb_mw_count(ndev);
1024 nt->mw_count = mw_count;
1026 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1033 for (i = 0; i < mw_count; i++) {
1034 mw = &nt->mw_vec[i];
1036 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
1037 &mw->xlat_align, &mw->xlat_align_size);
1041 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1049 mw->virt_addr = NULL;
1053 qp_bitmap = ntb_db_valid_mask(ndev);
1055 qp_count = ilog2(qp_bitmap);
1056 if (max_num_clients && max_num_clients < qp_count)
1057 qp_count = max_num_clients;
1058 else if (mw_count < qp_count)
1059 qp_count = mw_count;
1061 qp_bitmap &= BIT_ULL(qp_count) - 1;
1063 nt->qp_count = qp_count;
1064 nt->qp_bitmap = qp_bitmap;
1065 nt->qp_bitmap_free = qp_bitmap;
1067 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1074 if (nt_debugfs_dir) {
1075 nt->debugfs_node_dir =
1076 debugfs_create_dir(pci_name(ndev->pdev),
1080 for (i = 0; i < qp_count; i++) {
1081 rc = ntb_transport_init_queue(nt, i);
1086 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1087 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1089 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1093 INIT_LIST_HEAD(&nt->client_devs);
1094 rc = ntb_bus_init(nt);
1098 nt->link_is_up = false;
1099 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1100 ntb_link_event(ndev);
1105 ntb_clear_ctx(ndev);
1112 mw = &nt->mw_vec[i];
1120 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1122 struct ntb_transport_ctx *nt = ndev->ctx;
1123 struct ntb_transport_qp *qp;
1124 u64 qp_bitmap_alloc;
1127 ntb_transport_link_cleanup(nt);
1128 cancel_work_sync(&nt->link_cleanup);
1129 cancel_delayed_work_sync(&nt->link_work);
1131 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1133 /* verify that all the qp's are freed */
1134 for (i = 0; i < nt->qp_count; i++) {
1135 qp = &nt->qp_vec[i];
1136 if (qp_bitmap_alloc & BIT_ULL(i))
1137 ntb_transport_free_queue(qp);
1138 debugfs_remove_recursive(qp->debugfs_dir);
1141 ntb_link_disable(ndev);
1142 ntb_clear_ctx(ndev);
1146 for (i = nt->mw_count; i--; ) {
1148 iounmap(nt->mw_vec[i].vbase);
1156 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1158 struct ntb_queue_entry *entry;
1161 unsigned long irqflags;
1163 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1165 while (!list_empty(&qp->rx_post_q)) {
1166 entry = list_first_entry(&qp->rx_post_q,
1167 struct ntb_queue_entry, entry);
1168 if (!(entry->flags & DESC_DONE_FLAG))
1171 entry->rx_hdr->flags = 0;
1172 iowrite32(entry->index, &qp->rx_info->entry);
1174 cb_data = entry->cb_data;
1177 list_move_tail(&entry->entry, &qp->rx_free_q);
1179 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1181 if (qp->rx_handler && qp->client_ready)
1182 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1184 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1187 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1190 static void ntb_rx_copy_callback(void *data)
1192 struct ntb_queue_entry *entry = data;
1194 entry->flags |= DESC_DONE_FLAG;
1196 ntb_complete_rxc(entry->qp);
1199 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1201 void *buf = entry->buf;
1202 size_t len = entry->len;
1204 memcpy(buf, offset, len);
1206 /* Ensure that the data is fully copied out before clearing the flag */
1209 ntb_rx_copy_callback(entry);
1212 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1214 struct dma_async_tx_descriptor *txd;
1215 struct ntb_transport_qp *qp = entry->qp;
1216 struct dma_chan *chan = qp->dma_chan;
1217 struct dma_device *device;
1218 size_t pay_off, buff_off, len;
1219 struct dmaengine_unmap_data *unmap;
1220 dma_cookie_t cookie;
1221 void *buf = entry->buf;
1228 if (len < copy_bytes)
1231 device = chan->device;
1232 pay_off = (size_t)offset & ~PAGE_MASK;
1233 buff_off = (size_t)buf & ~PAGE_MASK;
1235 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1238 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1243 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1244 pay_off, len, DMA_TO_DEVICE);
1245 if (dma_mapping_error(device->dev, unmap->addr[0]))
1250 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1251 buff_off, len, DMA_FROM_DEVICE);
1252 if (dma_mapping_error(device->dev, unmap->addr[1]))
1255 unmap->from_cnt = 1;
1257 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1258 unmap->addr[0], len,
1259 DMA_PREP_INTERRUPT);
1263 txd->callback = ntb_rx_copy_callback;
1264 txd->callback_param = entry;
1265 dma_set_unmap(txd, unmap);
1267 cookie = dmaengine_submit(txd);
1268 if (dma_submit_error(cookie))
1271 dmaengine_unmap_put(unmap);
1273 qp->last_cookie = cookie;
1280 dmaengine_unmap_put(unmap);
1282 dmaengine_unmap_put(unmap);
1284 /* If the callbacks come out of order, the writing of the index to the
1285 * last completed will be out of order. This may result in the
1286 * receive stalling forever.
1288 dma_sync_wait(chan, qp->last_cookie);
1290 ntb_memcpy_rx(entry, offset);
1294 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1296 struct ntb_payload_header *hdr;
1297 struct ntb_queue_entry *entry;
1300 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1301 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1303 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1304 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1306 if (!(hdr->flags & DESC_DONE_FLAG)) {
1307 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1308 qp->rx_ring_empty++;
1312 if (hdr->flags & LINK_DOWN_FLAG) {
1313 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1314 ntb_qp_link_down(qp);
1319 if (hdr->ver != (u32)qp->rx_pkts) {
1320 dev_dbg(&qp->ndev->pdev->dev,
1321 "version mismatch, expected %llu - got %u\n",
1322 qp->rx_pkts, hdr->ver);
1327 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1329 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1330 qp->rx_err_no_buf++;
1334 entry->rx_hdr = hdr;
1335 entry->index = qp->rx_index;
1337 if (hdr->len > entry->len) {
1338 dev_dbg(&qp->ndev->pdev->dev,
1339 "receive buffer overflow! Wanted %d got %d\n",
1340 hdr->len, entry->len);
1344 entry->flags |= DESC_DONE_FLAG;
1346 ntb_complete_rxc(qp);
1348 dev_dbg(&qp->ndev->pdev->dev,
1349 "RX OK index %u ver %u size %d into buf size %d\n",
1350 qp->rx_index, hdr->ver, hdr->len, entry->len);
1352 qp->rx_bytes += hdr->len;
1355 entry->len = hdr->len;
1357 ntb_async_rx(entry, offset);
1361 qp->rx_index %= qp->rx_max_entry;
1366 static void ntb_transport_rxc_db(unsigned long data)
1368 struct ntb_transport_qp *qp = (void *)data;
1371 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1372 __func__, qp->qp_num);
1374 /* Limit the number of packets processed in a single interrupt to
1375 * provide fairness to others
1377 for (i = 0; i < qp->rx_max_entry; i++) {
1378 rc = ntb_process_rxc(qp);
1383 if (i && qp->dma_chan)
1384 dma_async_issue_pending(qp->dma_chan);
1386 if (i == qp->rx_max_entry) {
1387 /* there is more work to do */
1388 tasklet_schedule(&qp->rxc_db_work);
1389 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1390 /* the doorbell bit is set: clear it */
1391 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1392 /* ntb_db_read ensures ntb_db_clear write is committed */
1393 ntb_db_read(qp->ndev);
1395 /* an interrupt may have arrived between finishing
1396 * ntb_process_rxc and clearing the doorbell bit:
1397 * there might be some more work to do.
1399 tasklet_schedule(&qp->rxc_db_work);
1403 static void ntb_tx_copy_callback(void *data)
1405 struct ntb_queue_entry *entry = data;
1406 struct ntb_transport_qp *qp = entry->qp;
1407 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1409 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1411 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1413 /* The entry length can only be zero if the packet is intended to be a
1414 * "link down" or similar. Since no payload is being sent in these
1415 * cases, there is nothing to add to the completion queue.
1417 if (entry->len > 0) {
1418 qp->tx_bytes += entry->len;
1421 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1425 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1428 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1430 #ifdef ARCH_HAS_NOCACHE_UACCESS
1432 * Using non-temporal mov to improve performance on non-cached
1433 * writes, even though we aren't actually copying from user space.
1435 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1437 memcpy_toio(offset, entry->buf, entry->len);
1440 /* Ensure that the data is fully copied out before setting the flags */
1443 ntb_tx_copy_callback(entry);
1446 static void ntb_async_tx(struct ntb_transport_qp *qp,
1447 struct ntb_queue_entry *entry)
1449 struct ntb_payload_header __iomem *hdr;
1450 struct dma_async_tx_descriptor *txd;
1451 struct dma_chan *chan = qp->dma_chan;
1452 struct dma_device *device;
1453 size_t dest_off, buff_off;
1454 struct dmaengine_unmap_data *unmap;
1456 dma_cookie_t cookie;
1457 void __iomem *offset;
1458 size_t len = entry->len;
1459 void *buf = entry->buf;
1461 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1462 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1463 entry->tx_hdr = hdr;
1465 iowrite32(entry->len, &hdr->len);
1466 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1471 if (len < copy_bytes)
1474 device = chan->device;
1475 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1476 buff_off = (size_t)buf & ~PAGE_MASK;
1477 dest_off = (size_t)dest & ~PAGE_MASK;
1479 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1482 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1487 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1488 buff_off, len, DMA_TO_DEVICE);
1489 if (dma_mapping_error(device->dev, unmap->addr[0]))
1494 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1495 DMA_PREP_INTERRUPT);
1499 txd->callback = ntb_tx_copy_callback;
1500 txd->callback_param = entry;
1501 dma_set_unmap(txd, unmap);
1503 cookie = dmaengine_submit(txd);
1504 if (dma_submit_error(cookie))
1507 dmaengine_unmap_put(unmap);
1509 dma_async_issue_pending(chan);
1514 dmaengine_unmap_put(unmap);
1516 dmaengine_unmap_put(unmap);
1518 ntb_memcpy_tx(entry, offset);
1522 static int ntb_process_tx(struct ntb_transport_qp *qp,
1523 struct ntb_queue_entry *entry)
1525 if (qp->tx_index == qp->remote_rx_info->entry) {
1530 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1532 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1534 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1539 ntb_async_tx(qp, entry);
1542 qp->tx_index %= qp->tx_max_entry;
1549 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1551 struct pci_dev *pdev = qp->ndev->pdev;
1552 struct ntb_queue_entry *entry;
1555 if (!qp->link_is_up)
1558 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1560 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1561 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1570 entry->cb_data = NULL;
1573 entry->flags = LINK_DOWN_FLAG;
1575 rc = ntb_process_tx(qp, entry);
1577 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1580 ntb_qp_link_down_reset(qp);
1583 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1585 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1589 * ntb_transport_create_queue - Create a new NTB transport layer queue
1590 * @rx_handler: receive callback function
1591 * @tx_handler: transmit callback function
1592 * @event_handler: event callback function
1594 * Create a new NTB transport layer queue and provide the queue with a callback
1595 * routine for both transmit and receive. The receive callback routine will be
1596 * used to pass up data when the transport has received it on the queue. The
1597 * transmit callback routine will be called when the transport has completed the
1598 * transmission of the data on the queue and the data is ready to be freed.
1600 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1602 struct ntb_transport_qp *
1603 ntb_transport_create_queue(void *data, struct device *client_dev,
1604 const struct ntb_queue_handlers *handlers)
1606 struct ntb_dev *ndev;
1607 struct pci_dev *pdev;
1608 struct ntb_transport_ctx *nt;
1609 struct ntb_queue_entry *entry;
1610 struct ntb_transport_qp *qp;
1612 unsigned int free_queue;
1613 dma_cap_mask_t dma_mask;
1617 ndev = dev_ntb(client_dev->parent);
1621 node = dev_to_node(&ndev->dev);
1623 free_queue = ffs(nt->qp_bitmap);
1627 /* decrement free_queue to make it zero based */
1630 qp = &nt->qp_vec[free_queue];
1631 qp_bit = BIT_ULL(qp->qp_num);
1633 nt->qp_bitmap_free &= ~qp_bit;
1636 qp->rx_handler = handlers->rx_handler;
1637 qp->tx_handler = handlers->tx_handler;
1638 qp->event_handler = handlers->event_handler;
1640 dma_cap_zero(dma_mask);
1641 dma_cap_set(DMA_MEMCPY, dma_mask);
1644 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
1645 (void *)(unsigned long)node);
1647 dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
1649 qp->dma_chan = NULL;
1651 dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
1653 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1654 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1659 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1663 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1664 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1669 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1673 ntb_db_clear(qp->ndev, qp_bit);
1674 ntb_db_clear_mask(qp->ndev, qp_bit);
1676 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1681 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1684 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1687 dma_release_channel(qp->dma_chan);
1688 nt->qp_bitmap_free |= qp_bit;
1692 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1695 * ntb_transport_free_queue - Frees NTB transport queue
1696 * @qp: NTB queue to be freed
1698 * Frees NTB transport queue
1700 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1702 struct pci_dev *pdev;
1703 struct ntb_queue_entry *entry;
1709 pdev = qp->ndev->pdev;
1712 struct dma_chan *chan = qp->dma_chan;
1713 /* Putting the dma_chan to NULL will force any new traffic to be
1714 * processed by the CPU instead of the DAM engine
1716 qp->dma_chan = NULL;
1718 /* Try to be nice and wait for any queued DMA engine
1719 * transactions to process before smashing it with a rock
1721 dma_sync_wait(chan, qp->last_cookie);
1722 dmaengine_terminate_all(chan);
1723 dma_release_channel(chan);
1726 qp_bit = BIT_ULL(qp->qp_num);
1728 ntb_db_set_mask(qp->ndev, qp_bit);
1729 tasklet_disable(&qp->rxc_db_work);
1731 cancel_delayed_work_sync(&qp->link_work);
1734 qp->rx_handler = NULL;
1735 qp->tx_handler = NULL;
1736 qp->event_handler = NULL;
1738 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1741 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1742 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1746 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1747 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1751 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1754 qp->transport->qp_bitmap_free |= qp_bit;
1756 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1758 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1761 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1762 * @qp: NTB queue to be freed
1763 * @len: pointer to variable to write enqueued buffers length
1765 * Dequeues unused buffers from receive queue. Should only be used during
1768 * RETURNS: NULL error value on error, or void* for success.
1770 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1772 struct ntb_queue_entry *entry;
1775 if (!qp || qp->client_ready)
1778 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1782 buf = entry->cb_data;
1785 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1789 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1792 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1793 * @qp: NTB transport layer queue the entry is to be enqueued on
1794 * @cb: per buffer pointer for callback function to use
1795 * @data: pointer to data buffer that incoming packets will be copied into
1796 * @len: length of the data buffer
1798 * Enqueue a new receive buffer onto the transport queue into which a NTB
1799 * payload can be received into.
1801 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1803 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1806 struct ntb_queue_entry *entry;
1811 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1815 entry->cb_data = cb;
1820 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1822 tasklet_schedule(&qp->rxc_db_work);
1826 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1829 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1830 * @qp: NTB transport layer queue the entry is to be enqueued on
1831 * @cb: per buffer pointer for callback function to use
1832 * @data: pointer to data buffer that will be sent
1833 * @len: length of the data buffer
1835 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1836 * payload will be transmitted. This assumes that a lock is being held to
1837 * serialize access to the qp.
1839 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1841 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1844 struct ntb_queue_entry *entry;
1847 if (!qp || !qp->link_is_up || !len)
1850 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1852 qp->tx_err_no_buf++;
1856 entry->cb_data = cb;
1861 rc = ntb_process_tx(qp, entry);
1863 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1868 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1871 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1872 * @qp: NTB transport layer queue to be enabled
1874 * Notify NTB transport layer of client readiness to use queue
1876 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1881 qp->client_ready = true;
1883 if (qp->transport->link_is_up)
1884 schedule_delayed_work(&qp->link_work, 0);
1886 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1889 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1890 * @qp: NTB transport layer queue to be disabled
1892 * Notify NTB transport layer of client's desire to no longer receive data on
1893 * transport queue specified. It is the client's responsibility to ensure all
1894 * entries on queue are purged or otherwise handled appropriately.
1896 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1898 struct pci_dev *pdev;
1904 pdev = qp->ndev->pdev;
1905 qp->client_ready = false;
1907 val = ntb_spad_read(qp->ndev, QP_LINKS);
1909 ntb_peer_spad_write(qp->ndev, QP_LINKS,
1910 val & ~BIT(qp->qp_num));
1913 ntb_send_link_down(qp);
1915 cancel_delayed_work_sync(&qp->link_work);
1917 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1920 * ntb_transport_link_query - Query transport link state
1921 * @qp: NTB transport layer queue to be queried
1923 * Query connectivity to the remote system of the NTB transport queue
1925 * RETURNS: true for link up or false for link down
1927 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1932 return qp->link_is_up;
1934 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1937 * ntb_transport_qp_num - Query the qp number
1938 * @qp: NTB transport layer queue to be queried
1940 * Query qp number of the NTB transport queue
1942 * RETURNS: a zero based number specifying the qp number
1944 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1951 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1954 * ntb_transport_max_size - Query the max payload size of a qp
1955 * @qp: NTB transport layer queue to be queried
1957 * Query the maximum payload size permissible on the given qp
1959 * RETURNS: the max payload size of a qp
1961 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1969 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1971 /* If DMA engine usage is possible, try to find the max size for that */
1972 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1973 max -= max % (1 << qp->dma_chan->device->copy_align);
1977 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1979 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1981 unsigned int head = qp->tx_index;
1982 unsigned int tail = qp->remote_rx_info->entry;
1984 return tail > head ? tail - head : qp->tx_max_entry + tail - head;
1986 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
1988 static void ntb_transport_doorbell_callback(void *data, int vector)
1990 struct ntb_transport_ctx *nt = data;
1991 struct ntb_transport_qp *qp;
1993 unsigned int qp_num;
1995 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
1996 ntb_db_vector_mask(nt->ndev, vector));
1999 qp_num = __ffs(db_bits);
2000 qp = &nt->qp_vec[qp_num];
2002 tasklet_schedule(&qp->rxc_db_work);
2004 db_bits &= ~BIT_ULL(qp_num);
2008 static const struct ntb_ctx_ops ntb_transport_ops = {
2009 .link_event = ntb_transport_event_callback,
2010 .db_event = ntb_transport_doorbell_callback,
2013 static struct ntb_client ntb_transport_client = {
2015 .probe = ntb_transport_probe,
2016 .remove = ntb_transport_free,
2020 static int __init ntb_transport_init(void)
2024 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2026 if (debugfs_initialized())
2027 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2029 rc = bus_register(&ntb_transport_bus);
2033 rc = ntb_register_client(&ntb_transport_client);
2040 bus_unregister(&ntb_transport_bus);
2042 debugfs_remove_recursive(nt_debugfs_dir);
2045 module_init(ntb_transport_init);
2047 static void __exit ntb_transport_exit(void)
2049 debugfs_remove_recursive(nt_debugfs_dir);
2051 ntb_unregister_client(&ntb_transport_client);
2052 bus_unregister(&ntb_transport_bus);
2054 module_exit(ntb_transport_exit);