Merge tag 'topic/drm-fixes-2015-08-14' of git://anongit.freedesktop.org/drm-intel...
[firefly-linux-kernel-4.4.55.git] / drivers / ntb / ntb_transport.c
index 9faf1c6029af771bedf8bd9216819a75404e3891..1c6386d5f79c742737e4ee1a8a2b99df686ffaa0 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/uaccess.h>
 #include "linux/ntb.h"
 #include "linux/ntb_transport.h"
 
@@ -75,7 +76,7 @@ static unsigned long max_mw_size;
 module_param(max_mw_size, ulong, 0644);
 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
 
-static unsigned int transport_mtu = 0x401E;
+static unsigned int transport_mtu = 0x10000;
 module_param(transport_mtu, uint, 0644);
 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
 
@@ -87,6 +88,10 @@ static unsigned int copy_bytes = 1024;
 module_param(copy_bytes, uint, 0644);
 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
 
+static bool use_dma;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
+
 static struct dentry *nt_debugfs_dir;
 
 struct ntb_queue_entry {
@@ -137,10 +142,11 @@ struct ntb_transport_qp {
 
        void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
                           void *data, int len);
+       struct list_head rx_post_q;
        struct list_head rx_pend_q;
        struct list_head rx_free_q;
-       spinlock_t ntb_rx_pend_q_lock;
-       spinlock_t ntb_rx_free_q_lock;
+       /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+       spinlock_t ntb_rx_q_lock;
        void *rx_buff;
        unsigned int rx_index;
        unsigned int rx_max_entry;
@@ -206,6 +212,8 @@ struct ntb_transport_ctx {
        bool link_is_up;
        struct delayed_work link_work;
        struct work_struct link_cleanup;
+
+       struct dentry *debugfs_node_dir;
 };
 
 enum {
@@ -346,6 +354,7 @@ int ntb_transport_register_client_dev(char *device_name)
 {
        struct ntb_transport_client_dev *client_dev;
        struct ntb_transport_ctx *nt;
+       int node;
        int rc, i = 0;
 
        if (list_empty(&ntb_transport_list))
@@ -354,8 +363,10 @@ int ntb_transport_register_client_dev(char *device_name)
        list_for_each_entry(nt, &ntb_transport_list, entry) {
                struct device *dev;
 
-               client_dev = kzalloc(sizeof(*client_dev),
-                                    GFP_KERNEL);
+               node = dev_to_node(&nt->ndev->dev);
+
+               client_dev = kzalloc_node(sizeof(*client_dev),
+                                         GFP_KERNEL, node);
                if (!client_dev) {
                        rc = -ENOMEM;
                        goto err;
@@ -428,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
        char *buf;
        ssize_t ret, out_offset, out_count;
 
+       qp = filp->private_data;
+
+       if (!qp || !qp->link_is_up)
+               return 0;
+
        out_count = 1000;
 
        buf = kmalloc(out_count, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       qp = filp->private_data;
        out_offset = 0;
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "NTB QP stats\n");
@@ -526,6 +541,27 @@ out:
        return entry;
 }
 
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+                                          struct list_head *list,
+                                          struct list_head *to_list)
+{
+       struct ntb_queue_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+
+       if (list_empty(list)) {
+               entry = NULL;
+       } else {
+               entry = list_first_entry(list, struct ntb_queue_entry, entry);
+               list_move_tail(&entry->entry, to_list);
+       }
+
+       spin_unlock_irqrestore(lock, flags);
+
+       return entry;
+}
+
 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
                                     unsigned int qp_num)
 {
@@ -593,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
 }
 
 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
-                     unsigned int size)
+                     resource_size_t size)
 {
        struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
        struct pci_dev *pdev = nt->ndev->pdev;
-       unsigned int xlat_size, buff_size;
+       size_t xlat_size, buff_size;
        int rc;
 
+       if (!size)
+               return -EINVAL;
+
        xlat_size = round_up(size, mw->xlat_align_size);
        buff_size = round_up(size, mw->xlat_align);
 
@@ -619,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        if (!mw->virt_addr) {
                mw->xlat_size = 0;
                mw->buff_size = 0;
-               dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+               dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
                        buff_size);
                return -ENOMEM;
        }
@@ -648,18 +687,37 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        return 0;
 }
 
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+       qp->link_is_up = false;
+
+       qp->tx_index = 0;
+       qp->rx_index = 0;
+       qp->rx_bytes = 0;
+       qp->rx_pkts = 0;
+       qp->rx_ring_empty = 0;
+       qp->rx_err_no_buf = 0;
+       qp->rx_err_oflow = 0;
+       qp->rx_err_ver = 0;
+       qp->rx_memcpy = 0;
+       qp->rx_async = 0;
+       qp->tx_bytes = 0;
+       qp->tx_pkts = 0;
+       qp->tx_ring_full = 0;
+       qp->tx_err_no_buf = 0;
+       qp->tx_memcpy = 0;
+       qp->tx_async = 0;
+}
+
 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
 {
        struct ntb_transport_ctx *nt = qp->transport;
        struct pci_dev *pdev = nt->ndev->pdev;
 
-       if (qp->link_is_up) {
-               cancel_delayed_work_sync(&qp->link_work);
-               return;
-       }
+       dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
 
-       dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
-       qp->link_is_up = false;
+       cancel_delayed_work_sync(&qp->link_work);
+       ntb_qp_link_down_reset(qp);
 
        if (qp->event_handler)
                qp->event_handler(qp->cb_data, qp->link_is_up);
@@ -761,17 +819,17 @@ static void ntb_transport_link_work(struct work_struct *work)
        ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
 
        /* Query the remote side for its info */
-       val = ntb_peer_spad_read(ndev, VERSION);
+       val = ntb_spad_read(ndev, VERSION);
        dev_dbg(&pdev->dev, "Remote version = %d\n", val);
        if (val != NTB_TRANSPORT_VERSION)
                goto out;
 
-       val = ntb_peer_spad_read(ndev, NUM_QPS);
+       val = ntb_spad_read(ndev, NUM_QPS);
        dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
        if (val != nt->qp_count)
                goto out;
 
-       val = ntb_peer_spad_read(ndev, NUM_MWS);
+       val = ntb_spad_read(ndev, NUM_MWS);
        dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
        if (val != nt->mw_count)
                goto out;
@@ -779,10 +837,10 @@ static void ntb_transport_link_work(struct work_struct *work)
        for (i = 0; i < nt->mw_count; i++) {
                u64 val64;
 
-               val = ntb_peer_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
+               val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
                val64 = (u64)val << 32;
 
-               val = ntb_peer_spad_read(ndev, MW0_SZ_LOW + (i * 2));
+               val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
                val64 |= val;
 
                dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
@@ -831,7 +889,7 @@ static void ntb_qp_link_work(struct work_struct *work)
 
        /* query remote spad for qp ready bits */
        ntb_peer_spad_read(nt->ndev, QP_LINKS);
-       dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+       dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
 
        /* See if the remote side is up */
        if (val & BIT(qp->qp_num)) {
@@ -840,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
 
                if (qp->event_handler)
                        qp->event_handler(qp->cb_data, qp->link_is_up);
+
+               tasklet_schedule(&qp->rxc_db_work);
        } else if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -866,9 +926,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        qp->qp_num = qp_num;
        qp->transport = nt;
        qp->ndev = nt->ndev;
-       qp->link_is_up = false;
        qp->client_ready = false;
        qp->event_handler = NULL;
+       ntb_qp_link_down_reset(qp);
 
        if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
                num_qps_mw = qp_count / mw_count + 1;
@@ -896,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
 
-       if (nt_debugfs_dir) {
+       if (nt->debugfs_node_dir) {
                char debugfs_name[4];
 
                snprintf(debugfs_name, 4, "qp%d", qp_num);
                qp->debugfs_dir = debugfs_create_dir(debugfs_name,
-                                                    nt_debugfs_dir);
+                                                    nt->debugfs_node_dir);
 
                qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
                                                        qp->debugfs_dir, qp,
@@ -914,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
        INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
 
-       spin_lock_init(&qp->ntb_rx_pend_q_lock);
-       spin_lock_init(&qp->ntb_rx_free_q_lock);
+       spin_lock_init(&qp->ntb_rx_q_lock);
        spin_lock_init(&qp->ntb_tx_free_q_lock);
 
+       INIT_LIST_HEAD(&qp->rx_post_q);
        INIT_LIST_HEAD(&qp->rx_pend_q);
        INIT_LIST_HEAD(&qp->rx_free_q);
        INIT_LIST_HEAD(&qp->tx_free_q);
@@ -934,6 +994,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
        struct ntb_transport_mw *mw;
        unsigned int mw_count, qp_count;
        u64 qp_bitmap;
+       int node;
        int rc, i;
 
        if (ntb_db_is_unsafe(ndev))
@@ -943,7 +1004,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
                dev_dbg(&ndev->dev,
                        "scratchpad is unsafe, proceed anyway...\n");
 
-       nt = kzalloc(sizeof(*nt), GFP_KERNEL);
+       node = dev_to_node(&ndev->dev);
+
+       nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
        if (!nt)
                return -ENOMEM;
 
@@ -953,7 +1016,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
 
        nt->mw_count = mw_count;
 
-       nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL);
+       nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
+                                 GFP_KERNEL, node);
        if (!nt->mw_vec) {
                rc = -ENOMEM;
                goto err;
@@ -967,7 +1031,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
                if (rc)
                        goto err1;
 
-               mw->vbase = ioremap(mw->phys_addr, mw->phys_size);
+               mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
                if (!mw->vbase) {
                        rc = -ENOMEM;
                        goto err1;
@@ -993,12 +1057,19 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
        nt->qp_bitmap = qp_bitmap;
        nt->qp_bitmap_free = qp_bitmap;
 
-       nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL);
+       nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
+                                 GFP_KERNEL, node);
        if (!nt->qp_vec) {
                rc = -ENOMEM;
                goto err2;
        }
 
+       if (nt_debugfs_dir) {
+               nt->debugfs_node_dir =
+                       debugfs_create_dir(pci_name(ndev->pdev),
+                                          nt_debugfs_dir);
+       }
+
        for (i = 0; i < qp_count; i++) {
                rc = ntb_transport_init_queue(nt, i);
                if (rc)
@@ -1075,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
        kfree(nt);
 }
 
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
 {
-       struct ntb_queue_entry *entry = data;
-       struct ntb_transport_qp *qp = entry->qp;
-       void *cb_data = entry->cb_data;
-       unsigned int len = entry->len;
-       struct ntb_payload_header *hdr = entry->rx_hdr;
+       struct ntb_queue_entry *entry;
+       void *cb_data;
+       unsigned int len;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
 
-       hdr->flags = 0;
+       while (!list_empty(&qp->rx_post_q)) {
+               entry = list_first_entry(&qp->rx_post_q,
+                                        struct ntb_queue_entry, entry);
+               if (!(entry->flags & DESC_DONE_FLAG))
+                       break;
 
-       iowrite32(entry->index, &qp->rx_info->entry);
+               entry->rx_hdr->flags = 0;
+               iowrite32(entry->index, &qp->rx_info->entry);
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+               cb_data = entry->cb_data;
+               len = entry->len;
 
-       if (qp->rx_handler && qp->client_ready)
-               qp->rx_handler(qp, qp->cb_data, cb_data, len);
+               list_move_tail(&entry->entry, &qp->rx_free_q);
+
+               spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+
+               if (qp->rx_handler && qp->client_ready)
+                       qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+               spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+       }
+
+       spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data)
+{
+       struct ntb_queue_entry *entry = data;
+
+       entry->flags |= DESC_DONE_FLAG;
+
+       ntb_complete_rxc(entry->qp);
 }
 
 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1106,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
        ntb_rx_copy_callback(entry);
 }
 
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
-                        size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
 {
        struct dma_async_tx_descriptor *txd;
        struct ntb_transport_qp *qp = entry->qp;
        struct dma_chan *chan = qp->dma_chan;
        struct dma_device *device;
-       size_t pay_off, buff_off;
+       size_t pay_off, buff_off, len;
        struct dmaengine_unmap_data *unmap;
        dma_cookie_t cookie;
        void *buf = entry->buf;
 
-       entry->len = len;
+       len = entry->len;
 
        if (!chan)
                goto err;
@@ -1194,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
        struct ntb_payload_header *hdr;
        struct ntb_queue_entry *entry;
        void *offset;
-       int rc;
 
        offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
        hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1212,8 +1306,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
                dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
                ntb_qp_link_down(qp);
                hdr->flags = 0;
-               iowrite32(qp->rx_index, &qp->rx_info->entry);
-               return 0;
+               return -EAGAIN;
        }
 
        if (hdr->ver != (u32)qp->rx_pkts) {
@@ -1224,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
                return -EIO;
        }
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
        if (!entry) {
                dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
                qp->rx_err_no_buf++;
-
-               rc = -ENOMEM;
-               goto err;
+               return -EAGAIN;
        }
 
+       entry->rx_hdr = hdr;
+       entry->index = qp->rx_index;
+
        if (hdr->len > entry->len) {
                dev_dbg(&qp->ndev->pdev->dev,
                        "receive buffer overflow! Wanted %d got %d\n",
                        hdr->len, entry->len);
                qp->rx_err_oflow++;
 
-               rc = -EIO;
-               goto err;
-       }
+               entry->len = -EIO;
+               entry->flags |= DESC_DONE_FLAG;
 
-       dev_dbg(&qp->ndev->pdev->dev,
-               "RX OK index %u ver %u size %d into buf size %d\n",
-               qp->rx_index, hdr->ver, hdr->len, entry->len);
+               ntb_complete_rxc(qp);
+       } else {
+               dev_dbg(&qp->ndev->pdev->dev,
+                       "RX OK index %u ver %u size %d into buf size %d\n",
+                       qp->rx_index, hdr->ver, hdr->len, entry->len);
 
-       qp->rx_bytes += hdr->len;
-       qp->rx_pkts++;
+               qp->rx_bytes += hdr->len;
+               qp->rx_pkts++;
 
-       entry->index = qp->rx_index;
-       entry->rx_hdr = hdr;
+               entry->len = hdr->len;
 
-       ntb_async_rx(entry, offset, hdr->len);
+               ntb_async_rx(entry, offset);
+       }
 
        qp->rx_index++;
        qp->rx_index %= qp->rx_max_entry;
 
        return 0;
-
-err:
-       /* FIXME: if this syncrhonous update of the rx_index gets ahead of
-        * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
-        * scenarios:
-        *
-        * 1) The peer might miss this update, but observe the update
-        * from the memcpy completion callback.  In this case, the buffer will
-        * not be freed on the peer to be reused for a different packet.  The
-        * successful rx of a later packet would clear the condition, but the
-        * condition could persist if several rx fail in a row.
-        *
-        * 2) The peer may observe this update before the asyncrhonous copy of
-        * prior packets is completed.  The peer may overwrite the buffers of
-        * the prior packets before they are copied.
-        *
-        * 3) Both: the peer may observe the update, and then observe the index
-        * decrement by the asynchronous completion callback.  Who knows what
-        * badness that will cause.
-        */
-       hdr->flags = 0;
-       iowrite32(qp->rx_index, &qp->rx_info->entry);
-
-       return rc;
 }
 
 static void ntb_transport_rxc_db(unsigned long data)
@@ -1302,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
                        break;
        }
 
-       if (qp->dma_chan)
+       if (i && qp->dma_chan)
                dma_async_issue_pending(qp->dma_chan);
 
        if (i == qp->rx_max_entry) {
@@ -1349,7 +1420,15 @@ static void ntb_tx_copy_callback(void *data)
 
 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
 {
+#ifdef ARCH_HAS_NOCACHE_UACCESS
+       /*
+        * Using non-temporal mov to improve performance on non-cached
+        * writes, even though we aren't actually copying from user space.
+        */
+       __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
+#else
        memcpy_toio(offset, entry->buf, entry->len);
+#endif
 
        /* Ensure that the data is fully copied out before setting the flags */
        wmb();
@@ -1469,8 +1548,7 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
        if (!qp->link_is_up)
                return;
 
-       qp->link_is_up = false;
-       dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+       dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
 
        for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
                entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
@@ -1491,6 +1569,13 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
        if (rc)
                dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
                        qp->qp_num);
+
+       ntb_qp_link_down_reset(qp);
+}
+
+static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+       return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
 }
 
 /**
@@ -1518,12 +1603,16 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
        struct ntb_transport_qp *qp;
        u64 qp_bit;
        unsigned int free_queue;
+       dma_cap_mask_t dma_mask;
+       int node;
        int i;
 
        ndev = dev_ntb(client_dev->parent);
        pdev = ndev->pdev;
        nt = ndev->ctx;
 
+       node = dev_to_node(&ndev->dev);
+
        free_queue = ffs(nt->qp_bitmap);
        if (!free_queue)
                goto err;
@@ -1541,25 +1630,31 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
        qp->tx_handler = handlers->tx_handler;
        qp->event_handler = handlers->event_handler;
 
-       dmaengine_get();
-       qp->dma_chan = dma_find_channel(DMA_MEMCPY);
-       if (!qp->dma_chan) {
-               dmaengine_put();
-               dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
+       dma_cap_zero(dma_mask);
+       dma_cap_set(DMA_MEMCPY, dma_mask);
+
+       if (use_dma) {
+               qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
+                                                  (void *)(unsigned long)node);
+               if (!qp->dma_chan)
+                       dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
+       } else {
+               qp->dma_chan = NULL;
        }
+       dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
 
        for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
-               entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+               entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
                if (!entry)
                        goto err1;
 
                entry->qp = qp;
-               ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+               ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
                             &qp->rx_free_q);
        }
 
        for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
-               entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+               entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
                if (!entry)
                        goto err2;
 
@@ -1579,10 +1674,10 @@ err2:
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 err1:
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
        if (qp->dma_chan)
-               dmaengine_put();
+               dma_release_channel(qp->dma_chan);
        nt->qp_bitmap_free |= qp_bit;
 err:
        return NULL;
@@ -1597,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
-       struct ntb_transport_ctx *nt = qp->transport;
        struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
        u64 qp_bit;
@@ -1619,7 +1713,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
                 */
                dma_sync_wait(chan, qp->last_cookie);
                dmaengine_terminate_all(chan);
-               dmaengine_put();
+               dma_release_channel(chan);
        }
 
        qp_bit = BIT_ULL(qp->qp_num);
@@ -1634,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        qp->tx_handler = NULL;
        qp->event_handler = NULL;
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
-               dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+               kfree(entry);
+       }
+
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
                kfree(entry);
        }
 
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 
-       nt->qp_bitmap_free |= qp_bit;
+       qp->transport->qp_bitmap_free |= qp_bit;
 
        dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
 }
@@ -1669,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
        if (!qp || qp->client_ready)
                return NULL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
        if (!entry)
                return NULL;
 
        buf = entry->cb_data;
        *len = entry->len;
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
 
        return buf;
 }
@@ -1702,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
        if (!qp)
                return -EINVAL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
        if (!entry)
                return -ENOMEM;
 
        entry->cb_data = cb;
        entry->buf = data;
        entry->len = len;
+       entry->flags = 0;
+
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
 
-       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+       tasklet_schedule(&qp->rxc_db_work);
 
        return 0;
 }
@@ -1903,6 +2005,8 @@ static int __init ntb_transport_init(void)
 {
        int rc;
 
+       pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
+
        if (debugfs_initialized())
                nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);