NTB: Clean up QP stats info
[firefly-linux-kernel-4.4.55.git] / drivers / ntb / ntb_transport.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Transport Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
64
65 #define NTB_TRANSPORT_VERSION   4
66 #define NTB_TRANSPORT_VER       "4"
67 #define NTB_TRANSPORT_NAME      "ntb_transport"
68 #define NTB_TRANSPORT_DESC      "Software Queue-Pair Transport over NTB"
69
70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
71 MODULE_VERSION(NTB_TRANSPORT_VER);
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_AUTHOR("Intel Corporation");
74
75 static unsigned long max_mw_size;
76 module_param(max_mw_size, ulong, 0644);
77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
78
79 static unsigned int transport_mtu = 0x10000;
80 module_param(transport_mtu, uint, 0644);
81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
82
83 static unsigned char max_num_clients;
84 module_param(max_num_clients, byte, 0644);
85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
86
87 static unsigned int copy_bytes = 1024;
88 module_param(copy_bytes, uint, 0644);
89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90
91 static bool use_dma;
92 module_param(use_dma, bool, 0644);
93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
94
95 static struct dentry *nt_debugfs_dir;
96
97 struct ntb_queue_entry {
98         /* ntb_queue list reference */
99         struct list_head entry;
100         /* pointers to data to be transferred */
101         void *cb_data;
102         void *buf;
103         unsigned int len;
104         unsigned int flags;
105
106         struct ntb_transport_qp *qp;
107         union {
108                 struct ntb_payload_header __iomem *tx_hdr;
109                 struct ntb_payload_header *rx_hdr;
110         };
111         unsigned int index;
112 };
113
114 struct ntb_rx_info {
115         unsigned int entry;
116 };
117
118 struct ntb_transport_qp {
119         struct ntb_transport_ctx *transport;
120         struct ntb_dev *ndev;
121         void *cb_data;
122         struct dma_chan *dma_chan;
123
124         bool client_ready;
125         bool link_is_up;
126
127         u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
128         u64 qp_bit;
129
130         struct ntb_rx_info __iomem *rx_info;
131         struct ntb_rx_info *remote_rx_info;
132
133         void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
134                            void *data, int len);
135         struct list_head tx_free_q;
136         spinlock_t ntb_tx_free_q_lock;
137         void __iomem *tx_mw;
138         dma_addr_t tx_mw_phys;
139         unsigned int tx_index;
140         unsigned int tx_max_entry;
141         unsigned int tx_max_frame;
142
143         void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144                            void *data, int len);
145         struct list_head rx_post_q;
146         struct list_head rx_pend_q;
147         struct list_head rx_free_q;
148         /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
149         spinlock_t ntb_rx_q_lock;
150         void *rx_buff;
151         unsigned int rx_index;
152         unsigned int rx_max_entry;
153         unsigned int rx_max_frame;
154         dma_cookie_t last_cookie;
155         struct tasklet_struct rxc_db_work;
156
157         void (*event_handler)(void *data, int status);
158         struct delayed_work link_work;
159         struct work_struct link_cleanup;
160
161         struct dentry *debugfs_dir;
162         struct dentry *debugfs_stats;
163
164         /* Stats */
165         u64 rx_bytes;
166         u64 rx_pkts;
167         u64 rx_ring_empty;
168         u64 rx_err_no_buf;
169         u64 rx_err_oflow;
170         u64 rx_err_ver;
171         u64 rx_memcpy;
172         u64 rx_async;
173         u64 tx_bytes;
174         u64 tx_pkts;
175         u64 tx_ring_full;
176         u64 tx_err_no_buf;
177         u64 tx_memcpy;
178         u64 tx_async;
179 };
180
181 struct ntb_transport_mw {
182         phys_addr_t phys_addr;
183         resource_size_t phys_size;
184         resource_size_t xlat_align;
185         resource_size_t xlat_align_size;
186         void __iomem *vbase;
187         size_t xlat_size;
188         size_t buff_size;
189         void *virt_addr;
190         dma_addr_t dma_addr;
191 };
192
193 struct ntb_transport_client_dev {
194         struct list_head entry;
195         struct ntb_transport_ctx *nt;
196         struct device dev;
197 };
198
199 struct ntb_transport_ctx {
200         struct list_head entry;
201         struct list_head client_devs;
202
203         struct ntb_dev *ndev;
204
205         struct ntb_transport_mw *mw_vec;
206         struct ntb_transport_qp *qp_vec;
207         unsigned int mw_count;
208         unsigned int qp_count;
209         u64 qp_bitmap;
210         u64 qp_bitmap_free;
211
212         bool link_is_up;
213         struct delayed_work link_work;
214         struct work_struct link_cleanup;
215
216         struct dentry *debugfs_node_dir;
217 };
218
219 enum {
220         DESC_DONE_FLAG = BIT(0),
221         LINK_DOWN_FLAG = BIT(1),
222 };
223
224 struct ntb_payload_header {
225         unsigned int ver;
226         unsigned int len;
227         unsigned int flags;
228 };
229
230 enum {
231         VERSION = 0,
232         QP_LINKS,
233         NUM_QPS,
234         NUM_MWS,
235         MW0_SZ_HIGH,
236         MW0_SZ_LOW,
237         MW1_SZ_HIGH,
238         MW1_SZ_LOW,
239         MAX_SPAD,
240 };
241
242 #define dev_client_dev(__dev) \
243         container_of((__dev), struct ntb_transport_client_dev, dev)
244
245 #define drv_client(__drv) \
246         container_of((__drv), struct ntb_transport_client, driver)
247
248 #define QP_TO_MW(nt, qp)        ((qp) % nt->mw_count)
249 #define NTB_QP_DEF_NUM_ENTRIES  100
250 #define NTB_LINK_DOWN_TIMEOUT   10
251
252 static void ntb_transport_rxc_db(unsigned long data);
253 static const struct ntb_ctx_ops ntb_transport_ops;
254 static struct ntb_client ntb_transport_client;
255
256 static int ntb_transport_bus_match(struct device *dev,
257                                    struct device_driver *drv)
258 {
259         return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
260 }
261
262 static int ntb_transport_bus_probe(struct device *dev)
263 {
264         const struct ntb_transport_client *client;
265         int rc = -EINVAL;
266
267         get_device(dev);
268
269         client = drv_client(dev->driver);
270         rc = client->probe(dev);
271         if (rc)
272                 put_device(dev);
273
274         return rc;
275 }
276
277 static int ntb_transport_bus_remove(struct device *dev)
278 {
279         const struct ntb_transport_client *client;
280
281         client = drv_client(dev->driver);
282         client->remove(dev);
283
284         put_device(dev);
285
286         return 0;
287 }
288
289 static struct bus_type ntb_transport_bus = {
290         .name = "ntb_transport",
291         .match = ntb_transport_bus_match,
292         .probe = ntb_transport_bus_probe,
293         .remove = ntb_transport_bus_remove,
294 };
295
296 static LIST_HEAD(ntb_transport_list);
297
298 static int ntb_bus_init(struct ntb_transport_ctx *nt)
299 {
300         list_add_tail(&nt->entry, &ntb_transport_list);
301         return 0;
302 }
303
304 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
305 {
306         struct ntb_transport_client_dev *client_dev, *cd;
307
308         list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
309                 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
310                         dev_name(&client_dev->dev));
311                 list_del(&client_dev->entry);
312                 device_unregister(&client_dev->dev);
313         }
314
315         list_del(&nt->entry);
316 }
317
318 static void ntb_transport_client_release(struct device *dev)
319 {
320         struct ntb_transport_client_dev *client_dev;
321
322         client_dev = dev_client_dev(dev);
323         kfree(client_dev);
324 }
325
326 /**
327  * ntb_transport_unregister_client_dev - Unregister NTB client device
328  * @device_name: Name of NTB client device
329  *
330  * Unregister an NTB client device with the NTB transport layer
331  */
332 void ntb_transport_unregister_client_dev(char *device_name)
333 {
334         struct ntb_transport_client_dev *client, *cd;
335         struct ntb_transport_ctx *nt;
336
337         list_for_each_entry(nt, &ntb_transport_list, entry)
338                 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
339                         if (!strncmp(dev_name(&client->dev), device_name,
340                                      strlen(device_name))) {
341                                 list_del(&client->entry);
342                                 device_unregister(&client->dev);
343                         }
344 }
345 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
346
347 /**
348  * ntb_transport_register_client_dev - Register NTB client device
349  * @device_name: Name of NTB client device
350  *
351  * Register an NTB client device with the NTB transport layer
352  */
353 int ntb_transport_register_client_dev(char *device_name)
354 {
355         struct ntb_transport_client_dev *client_dev;
356         struct ntb_transport_ctx *nt;
357         int node;
358         int rc, i = 0;
359
360         if (list_empty(&ntb_transport_list))
361                 return -ENODEV;
362
363         list_for_each_entry(nt, &ntb_transport_list, entry) {
364                 struct device *dev;
365
366                 node = dev_to_node(&nt->ndev->dev);
367
368                 client_dev = kzalloc_node(sizeof(*client_dev),
369                                           GFP_KERNEL, node);
370                 if (!client_dev) {
371                         rc = -ENOMEM;
372                         goto err;
373                 }
374
375                 dev = &client_dev->dev;
376
377                 /* setup and register client devices */
378                 dev_set_name(dev, "%s%d", device_name, i);
379                 dev->bus = &ntb_transport_bus;
380                 dev->release = ntb_transport_client_release;
381                 dev->parent = &nt->ndev->dev;
382
383                 rc = device_register(dev);
384                 if (rc) {
385                         kfree(client_dev);
386                         goto err;
387                 }
388
389                 list_add_tail(&client_dev->entry, &nt->client_devs);
390                 i++;
391         }
392
393         return 0;
394
395 err:
396         ntb_transport_unregister_client_dev(device_name);
397
398         return rc;
399 }
400 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
401
402 /**
403  * ntb_transport_register_client - Register NTB client driver
404  * @drv: NTB client driver to be registered
405  *
406  * Register an NTB client driver with the NTB transport layer
407  *
408  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
409  */
410 int ntb_transport_register_client(struct ntb_transport_client *drv)
411 {
412         drv->driver.bus = &ntb_transport_bus;
413
414         if (list_empty(&ntb_transport_list))
415                 return -ENODEV;
416
417         return driver_register(&drv->driver);
418 }
419 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
420
421 /**
422  * ntb_transport_unregister_client - Unregister NTB client driver
423  * @drv: NTB client driver to be unregistered
424  *
425  * Unregister an NTB client driver with the NTB transport layer
426  *
427  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
428  */
429 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
430 {
431         driver_unregister(&drv->driver);
432 }
433 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
434
435 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
436                             loff_t *offp)
437 {
438         struct ntb_transport_qp *qp;
439         char *buf;
440         ssize_t ret, out_offset, out_count;
441
442         qp = filp->private_data;
443
444         if (!qp || !qp->link_is_up)
445                 return 0;
446
447         out_count = 1000;
448
449         buf = kmalloc(out_count, GFP_KERNEL);
450         if (!buf)
451                 return -ENOMEM;
452
453         out_offset = 0;
454         out_offset += snprintf(buf + out_offset, out_count - out_offset,
455                                "\nNTB QP stats:\n\n");
456         out_offset += snprintf(buf + out_offset, out_count - out_offset,
457                                "rx_bytes - \t%llu\n", qp->rx_bytes);
458         out_offset += snprintf(buf + out_offset, out_count - out_offset,
459                                "rx_pkts - \t%llu\n", qp->rx_pkts);
460         out_offset += snprintf(buf + out_offset, out_count - out_offset,
461                                "rx_memcpy - \t%llu\n", qp->rx_memcpy);
462         out_offset += snprintf(buf + out_offset, out_count - out_offset,
463                                "rx_async - \t%llu\n", qp->rx_async);
464         out_offset += snprintf(buf + out_offset, out_count - out_offset,
465                                "rx_ring_empty - %llu\n", qp->rx_ring_empty);
466         out_offset += snprintf(buf + out_offset, out_count - out_offset,
467                                "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
468         out_offset += snprintf(buf + out_offset, out_count - out_offset,
469                                "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
470         out_offset += snprintf(buf + out_offset, out_count - out_offset,
471                                "rx_err_ver - \t%llu\n", qp->rx_err_ver);
472         out_offset += snprintf(buf + out_offset, out_count - out_offset,
473                                "rx_buff - \t0x%p\n", qp->rx_buff);
474         out_offset += snprintf(buf + out_offset, out_count - out_offset,
475                                "rx_index - \t%u\n", qp->rx_index);
476         out_offset += snprintf(buf + out_offset, out_count - out_offset,
477                                "rx_max_entry - \t%u\n\n", qp->rx_max_entry);
478
479         out_offset += snprintf(buf + out_offset, out_count - out_offset,
480                                "tx_bytes - \t%llu\n", qp->tx_bytes);
481         out_offset += snprintf(buf + out_offset, out_count - out_offset,
482                                "tx_pkts - \t%llu\n", qp->tx_pkts);
483         out_offset += snprintf(buf + out_offset, out_count - out_offset,
484                                "tx_memcpy - \t%llu\n", qp->tx_memcpy);
485         out_offset += snprintf(buf + out_offset, out_count - out_offset,
486                                "tx_async - \t%llu\n", qp->tx_async);
487         out_offset += snprintf(buf + out_offset, out_count - out_offset,
488                                "tx_ring_full - \t%llu\n", qp->tx_ring_full);
489         out_offset += snprintf(buf + out_offset, out_count - out_offset,
490                                "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
491         out_offset += snprintf(buf + out_offset, out_count - out_offset,
492                                "tx_mw - \t0x%p\n", qp->tx_mw);
493         out_offset += snprintf(buf + out_offset, out_count - out_offset,
494                                "tx_index (H) - \t%u\n", qp->tx_index);
495         out_offset += snprintf(buf + out_offset, out_count - out_offset,
496                                "RRI (T) - \t%u\n",
497                                qp->remote_rx_info->entry);
498         out_offset += snprintf(buf + out_offset, out_count - out_offset,
499                                "tx_max_entry - \t%u\n", qp->tx_max_entry);
500         out_offset += snprintf(buf + out_offset, out_count - out_offset,
501                                "free tx - \t%u\n",
502                                ntb_transport_tx_free_entry(qp));
503
504         out_offset += snprintf(buf + out_offset, out_count - out_offset,
505                                "\n");
506         out_offset += snprintf(buf + out_offset, out_count - out_offset,
507                                "Using DMA - \t%s\n", use_dma ? "Yes" : "No");
508         out_offset += snprintf(buf + out_offset, out_count - out_offset,
509                                "QP Link - \t%s\n",
510                                qp->link_is_up ? "Up" : "Down");
511         out_offset += snprintf(buf + out_offset, out_count - out_offset,
512                                "\n");
513
514         if (out_offset > out_count)
515                 out_offset = out_count;
516
517         ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
518         kfree(buf);
519         return ret;
520 }
521
522 static const struct file_operations ntb_qp_debugfs_stats = {
523         .owner = THIS_MODULE,
524         .open = simple_open,
525         .read = debugfs_read,
526 };
527
528 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
529                          struct list_head *list)
530 {
531         unsigned long flags;
532
533         spin_lock_irqsave(lock, flags);
534         list_add_tail(entry, list);
535         spin_unlock_irqrestore(lock, flags);
536 }
537
538 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
539                                            struct list_head *list)
540 {
541         struct ntb_queue_entry *entry;
542         unsigned long flags;
543
544         spin_lock_irqsave(lock, flags);
545         if (list_empty(list)) {
546                 entry = NULL;
547                 goto out;
548         }
549         entry = list_first_entry(list, struct ntb_queue_entry, entry);
550         list_del(&entry->entry);
551
552 out:
553         spin_unlock_irqrestore(lock, flags);
554
555         return entry;
556 }
557
558 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
559                                            struct list_head *list,
560                                            struct list_head *to_list)
561 {
562         struct ntb_queue_entry *entry;
563         unsigned long flags;
564
565         spin_lock_irqsave(lock, flags);
566
567         if (list_empty(list)) {
568                 entry = NULL;
569         } else {
570                 entry = list_first_entry(list, struct ntb_queue_entry, entry);
571                 list_move_tail(&entry->entry, to_list);
572         }
573
574         spin_unlock_irqrestore(lock, flags);
575
576         return entry;
577 }
578
579 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
580                                      unsigned int qp_num)
581 {
582         struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
583         struct ntb_transport_mw *mw;
584         unsigned int rx_size, num_qps_mw;
585         unsigned int mw_num, mw_count, qp_count;
586         unsigned int i;
587
588         mw_count = nt->mw_count;
589         qp_count = nt->qp_count;
590
591         mw_num = QP_TO_MW(nt, qp_num);
592         mw = &nt->mw_vec[mw_num];
593
594         if (!mw->virt_addr)
595                 return -ENOMEM;
596
597         if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
598                 num_qps_mw = qp_count / mw_count + 1;
599         else
600                 num_qps_mw = qp_count / mw_count;
601
602         rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
603         qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
604         rx_size -= sizeof(struct ntb_rx_info);
605
606         qp->remote_rx_info = qp->rx_buff + rx_size;
607
608         /* Due to housekeeping, there must be atleast 2 buffs */
609         qp->rx_max_frame = min(transport_mtu, rx_size / 2);
610         qp->rx_max_entry = rx_size / qp->rx_max_frame;
611         qp->rx_index = 0;
612
613         qp->remote_rx_info->entry = qp->rx_max_entry - 1;
614
615         /* setup the hdr offsets with 0's */
616         for (i = 0; i < qp->rx_max_entry; i++) {
617                 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
618                                 sizeof(struct ntb_payload_header));
619                 memset(offset, 0, sizeof(struct ntb_payload_header));
620         }
621
622         qp->rx_pkts = 0;
623         qp->tx_pkts = 0;
624         qp->tx_index = 0;
625
626         return 0;
627 }
628
629 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
630 {
631         struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
632         struct pci_dev *pdev = nt->ndev->pdev;
633
634         if (!mw->virt_addr)
635                 return;
636
637         ntb_mw_clear_trans(nt->ndev, num_mw);
638         dma_free_coherent(&pdev->dev, mw->buff_size,
639                           mw->virt_addr, mw->dma_addr);
640         mw->xlat_size = 0;
641         mw->buff_size = 0;
642         mw->virt_addr = NULL;
643 }
644
645 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
646                       resource_size_t size)
647 {
648         struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
649         struct pci_dev *pdev = nt->ndev->pdev;
650         size_t xlat_size, buff_size;
651         int rc;
652
653         if (!size)
654                 return -EINVAL;
655
656         xlat_size = round_up(size, mw->xlat_align_size);
657         buff_size = round_up(size, mw->xlat_align);
658
659         /* No need to re-setup */
660         if (mw->xlat_size == xlat_size)
661                 return 0;
662
663         if (mw->buff_size)
664                 ntb_free_mw(nt, num_mw);
665
666         /* Alloc memory for receiving data.  Must be aligned */
667         mw->xlat_size = xlat_size;
668         mw->buff_size = buff_size;
669
670         mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
671                                            &mw->dma_addr, GFP_KERNEL);
672         if (!mw->virt_addr) {
673                 mw->xlat_size = 0;
674                 mw->buff_size = 0;
675                 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
676                         buff_size);
677                 return -ENOMEM;
678         }
679
680         /*
681          * we must ensure that the memory address allocated is BAR size
682          * aligned in order for the XLAT register to take the value. This
683          * is a requirement of the hardware. It is recommended to setup CMA
684          * for BAR sizes equal or greater than 4MB.
685          */
686         if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
687                 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
688                         &mw->dma_addr);
689                 ntb_free_mw(nt, num_mw);
690                 return -ENOMEM;
691         }
692
693         /* Notify HW the memory location of the receive buffer */
694         rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
695         if (rc) {
696                 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
697                 ntb_free_mw(nt, num_mw);
698                 return -EIO;
699         }
700
701         return 0;
702 }
703
704 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
705 {
706         qp->link_is_up = false;
707
708         qp->tx_index = 0;
709         qp->rx_index = 0;
710         qp->rx_bytes = 0;
711         qp->rx_pkts = 0;
712         qp->rx_ring_empty = 0;
713         qp->rx_err_no_buf = 0;
714         qp->rx_err_oflow = 0;
715         qp->rx_err_ver = 0;
716         qp->rx_memcpy = 0;
717         qp->rx_async = 0;
718         qp->tx_bytes = 0;
719         qp->tx_pkts = 0;
720         qp->tx_ring_full = 0;
721         qp->tx_err_no_buf = 0;
722         qp->tx_memcpy = 0;
723         qp->tx_async = 0;
724 }
725
726 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
727 {
728         struct ntb_transport_ctx *nt = qp->transport;
729         struct pci_dev *pdev = nt->ndev->pdev;
730
731         dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
732
733         cancel_delayed_work_sync(&qp->link_work);
734         ntb_qp_link_down_reset(qp);
735
736         if (qp->event_handler)
737                 qp->event_handler(qp->cb_data, qp->link_is_up);
738 }
739
740 static void ntb_qp_link_cleanup_work(struct work_struct *work)
741 {
742         struct ntb_transport_qp *qp = container_of(work,
743                                                    struct ntb_transport_qp,
744                                                    link_cleanup);
745         struct ntb_transport_ctx *nt = qp->transport;
746
747         ntb_qp_link_cleanup(qp);
748
749         if (nt->link_is_up)
750                 schedule_delayed_work(&qp->link_work,
751                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
752 }
753
754 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
755 {
756         schedule_work(&qp->link_cleanup);
757 }
758
759 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
760 {
761         struct ntb_transport_qp *qp;
762         u64 qp_bitmap_alloc;
763         int i;
764
765         qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
766
767         /* Pass along the info to any clients */
768         for (i = 0; i < nt->qp_count; i++)
769                 if (qp_bitmap_alloc & BIT_ULL(i)) {
770                         qp = &nt->qp_vec[i];
771                         ntb_qp_link_cleanup(qp);
772                         cancel_work_sync(&qp->link_cleanup);
773                         cancel_delayed_work_sync(&qp->link_work);
774                 }
775
776         if (!nt->link_is_up)
777                 cancel_delayed_work_sync(&nt->link_work);
778
779         /* The scratchpad registers keep the values if the remote side
780          * goes down, blast them now to give them a sane value the next
781          * time they are accessed
782          */
783         for (i = 0; i < MAX_SPAD; i++)
784                 ntb_spad_write(nt->ndev, i, 0);
785 }
786
787 static void ntb_transport_link_cleanup_work(struct work_struct *work)
788 {
789         struct ntb_transport_ctx *nt =
790                 container_of(work, struct ntb_transport_ctx, link_cleanup);
791
792         ntb_transport_link_cleanup(nt);
793 }
794
795 static void ntb_transport_event_callback(void *data)
796 {
797         struct ntb_transport_ctx *nt = data;
798
799         if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
800                 schedule_delayed_work(&nt->link_work, 0);
801         else
802                 schedule_work(&nt->link_cleanup);
803 }
804
805 static void ntb_transport_link_work(struct work_struct *work)
806 {
807         struct ntb_transport_ctx *nt =
808                 container_of(work, struct ntb_transport_ctx, link_work.work);
809         struct ntb_dev *ndev = nt->ndev;
810         struct pci_dev *pdev = ndev->pdev;
811         resource_size_t size;
812         u32 val;
813         int rc, i, spad;
814
815         /* send the local info, in the opposite order of the way we read it */
816         for (i = 0; i < nt->mw_count; i++) {
817                 size = nt->mw_vec[i].phys_size;
818
819                 if (max_mw_size && size > max_mw_size)
820                         size = max_mw_size;
821
822                 spad = MW0_SZ_HIGH + (i * 2);
823                 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
824
825                 spad = MW0_SZ_LOW + (i * 2);
826                 ntb_peer_spad_write(ndev, spad, (u32)size);
827         }
828
829         ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
830
831         ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
832
833         ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
834
835         /* Query the remote side for its info */
836         val = ntb_spad_read(ndev, VERSION);
837         dev_dbg(&pdev->dev, "Remote version = %d\n", val);
838         if (val != NTB_TRANSPORT_VERSION)
839                 goto out;
840
841         val = ntb_spad_read(ndev, NUM_QPS);
842         dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
843         if (val != nt->qp_count)
844                 goto out;
845
846         val = ntb_spad_read(ndev, NUM_MWS);
847         dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
848         if (val != nt->mw_count)
849                 goto out;
850
851         for (i = 0; i < nt->mw_count; i++) {
852                 u64 val64;
853
854                 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
855                 val64 = (u64)val << 32;
856
857                 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
858                 val64 |= val;
859
860                 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
861
862                 rc = ntb_set_mw(nt, i, val64);
863                 if (rc)
864                         goto out1;
865         }
866
867         nt->link_is_up = true;
868
869         for (i = 0; i < nt->qp_count; i++) {
870                 struct ntb_transport_qp *qp = &nt->qp_vec[i];
871
872                 ntb_transport_setup_qp_mw(nt, i);
873
874                 if (qp->client_ready)
875                         schedule_delayed_work(&qp->link_work, 0);
876         }
877
878         return;
879
880 out1:
881         for (i = 0; i < nt->mw_count; i++)
882                 ntb_free_mw(nt, i);
883 out:
884         if (ntb_link_is_up(ndev, NULL, NULL) == 1)
885                 schedule_delayed_work(&nt->link_work,
886                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
887 }
888
889 static void ntb_qp_link_work(struct work_struct *work)
890 {
891         struct ntb_transport_qp *qp = container_of(work,
892                                                    struct ntb_transport_qp,
893                                                    link_work.work);
894         struct pci_dev *pdev = qp->ndev->pdev;
895         struct ntb_transport_ctx *nt = qp->transport;
896         int val;
897
898         WARN_ON(!nt->link_is_up);
899
900         val = ntb_spad_read(nt->ndev, QP_LINKS);
901
902         ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
903
904         /* query remote spad for qp ready bits */
905         ntb_peer_spad_read(nt->ndev, QP_LINKS);
906         dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
907
908         /* See if the remote side is up */
909         if (val & BIT(qp->qp_num)) {
910                 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
911                 qp->link_is_up = true;
912
913                 if (qp->event_handler)
914                         qp->event_handler(qp->cb_data, qp->link_is_up);
915
916                 tasklet_schedule(&qp->rxc_db_work);
917         } else if (nt->link_is_up)
918                 schedule_delayed_work(&qp->link_work,
919                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
920 }
921
922 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
923                                     unsigned int qp_num)
924 {
925         struct ntb_transport_qp *qp;
926         struct ntb_transport_mw *mw;
927         phys_addr_t mw_base;
928         resource_size_t mw_size;
929         unsigned int num_qps_mw, tx_size;
930         unsigned int mw_num, mw_count, qp_count;
931         u64 qp_offset;
932
933         mw_count = nt->mw_count;
934         qp_count = nt->qp_count;
935
936         mw_num = QP_TO_MW(nt, qp_num);
937         mw = &nt->mw_vec[mw_num];
938
939         qp = &nt->qp_vec[qp_num];
940         qp->qp_num = qp_num;
941         qp->transport = nt;
942         qp->ndev = nt->ndev;
943         qp->client_ready = false;
944         qp->event_handler = NULL;
945         ntb_qp_link_down_reset(qp);
946
947         if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
948                 num_qps_mw = qp_count / mw_count + 1;
949         else
950                 num_qps_mw = qp_count / mw_count;
951
952         mw_base = nt->mw_vec[mw_num].phys_addr;
953         mw_size = nt->mw_vec[mw_num].phys_size;
954
955         tx_size = (unsigned int)mw_size / num_qps_mw;
956         qp_offset = tx_size * qp_num / mw_count;
957
958         qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
959         if (!qp->tx_mw)
960                 return -EINVAL;
961
962         qp->tx_mw_phys = mw_base + qp_offset;
963         if (!qp->tx_mw_phys)
964                 return -EINVAL;
965
966         tx_size -= sizeof(struct ntb_rx_info);
967         qp->rx_info = qp->tx_mw + tx_size;
968
969         /* Due to housekeeping, there must be atleast 2 buffs */
970         qp->tx_max_frame = min(transport_mtu, tx_size / 2);
971         qp->tx_max_entry = tx_size / qp->tx_max_frame;
972
973         if (nt->debugfs_node_dir) {
974                 char debugfs_name[4];
975
976                 snprintf(debugfs_name, 4, "qp%d", qp_num);
977                 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
978                                                      nt->debugfs_node_dir);
979
980                 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
981                                                         qp->debugfs_dir, qp,
982                                                         &ntb_qp_debugfs_stats);
983         } else {
984                 qp->debugfs_dir = NULL;
985                 qp->debugfs_stats = NULL;
986         }
987
988         INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
989         INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
990
991         spin_lock_init(&qp->ntb_rx_q_lock);
992         spin_lock_init(&qp->ntb_tx_free_q_lock);
993
994         INIT_LIST_HEAD(&qp->rx_post_q);
995         INIT_LIST_HEAD(&qp->rx_pend_q);
996         INIT_LIST_HEAD(&qp->rx_free_q);
997         INIT_LIST_HEAD(&qp->tx_free_q);
998
999         tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1000                      (unsigned long)qp);
1001
1002         return 0;
1003 }
1004
1005 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1006 {
1007         struct ntb_transport_ctx *nt;
1008         struct ntb_transport_mw *mw;
1009         unsigned int mw_count, qp_count;
1010         u64 qp_bitmap;
1011         int node;
1012         int rc, i;
1013
1014         if (ntb_db_is_unsafe(ndev))
1015                 dev_dbg(&ndev->dev,
1016                         "doorbell is unsafe, proceed anyway...\n");
1017         if (ntb_spad_is_unsafe(ndev))
1018                 dev_dbg(&ndev->dev,
1019                         "scratchpad is unsafe, proceed anyway...\n");
1020
1021         node = dev_to_node(&ndev->dev);
1022
1023         nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1024         if (!nt)
1025                 return -ENOMEM;
1026
1027         nt->ndev = ndev;
1028
1029         mw_count = ntb_mw_count(ndev);
1030
1031         nt->mw_count = mw_count;
1032
1033         nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1034                                   GFP_KERNEL, node);
1035         if (!nt->mw_vec) {
1036                 rc = -ENOMEM;
1037                 goto err;
1038         }
1039
1040         for (i = 0; i < mw_count; i++) {
1041                 mw = &nt->mw_vec[i];
1042
1043                 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
1044                                       &mw->xlat_align, &mw->xlat_align_size);
1045                 if (rc)
1046                         goto err1;
1047
1048                 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1049                 if (!mw->vbase) {
1050                         rc = -ENOMEM;
1051                         goto err1;
1052                 }
1053
1054                 mw->buff_size = 0;
1055                 mw->xlat_size = 0;
1056                 mw->virt_addr = NULL;
1057                 mw->dma_addr = 0;
1058         }
1059
1060         qp_bitmap = ntb_db_valid_mask(ndev);
1061
1062         qp_count = ilog2(qp_bitmap);
1063         if (max_num_clients && max_num_clients < qp_count)
1064                 qp_count = max_num_clients;
1065         else if (mw_count < qp_count)
1066                 qp_count = mw_count;
1067
1068         qp_bitmap &= BIT_ULL(qp_count) - 1;
1069
1070         nt->qp_count = qp_count;
1071         nt->qp_bitmap = qp_bitmap;
1072         nt->qp_bitmap_free = qp_bitmap;
1073
1074         nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1075                                   GFP_KERNEL, node);
1076         if (!nt->qp_vec) {
1077                 rc = -ENOMEM;
1078                 goto err2;
1079         }
1080
1081         if (nt_debugfs_dir) {
1082                 nt->debugfs_node_dir =
1083                         debugfs_create_dir(pci_name(ndev->pdev),
1084                                            nt_debugfs_dir);
1085         }
1086
1087         for (i = 0; i < qp_count; i++) {
1088                 rc = ntb_transport_init_queue(nt, i);
1089                 if (rc)
1090                         goto err3;
1091         }
1092
1093         INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1094         INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1095
1096         rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1097         if (rc)
1098                 goto err3;
1099
1100         INIT_LIST_HEAD(&nt->client_devs);
1101         rc = ntb_bus_init(nt);
1102         if (rc)
1103                 goto err4;
1104
1105         nt->link_is_up = false;
1106         ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1107         ntb_link_event(ndev);
1108
1109         return 0;
1110
1111 err4:
1112         ntb_clear_ctx(ndev);
1113 err3:
1114         kfree(nt->qp_vec);
1115 err2:
1116         kfree(nt->mw_vec);
1117 err1:
1118         while (i--) {
1119                 mw = &nt->mw_vec[i];
1120                 iounmap(mw->vbase);
1121         }
1122 err:
1123         kfree(nt);
1124         return rc;
1125 }
1126
1127 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1128 {
1129         struct ntb_transport_ctx *nt = ndev->ctx;
1130         struct ntb_transport_qp *qp;
1131         u64 qp_bitmap_alloc;
1132         int i;
1133
1134         ntb_transport_link_cleanup(nt);
1135         cancel_work_sync(&nt->link_cleanup);
1136         cancel_delayed_work_sync(&nt->link_work);
1137
1138         qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1139
1140         /* verify that all the qp's are freed */
1141         for (i = 0; i < nt->qp_count; i++) {
1142                 qp = &nt->qp_vec[i];
1143                 if (qp_bitmap_alloc & BIT_ULL(i))
1144                         ntb_transport_free_queue(qp);
1145                 debugfs_remove_recursive(qp->debugfs_dir);
1146         }
1147
1148         ntb_link_disable(ndev);
1149         ntb_clear_ctx(ndev);
1150
1151         ntb_bus_remove(nt);
1152
1153         for (i = nt->mw_count; i--; ) {
1154                 ntb_free_mw(nt, i);
1155                 iounmap(nt->mw_vec[i].vbase);
1156         }
1157
1158         kfree(nt->qp_vec);
1159         kfree(nt->mw_vec);
1160         kfree(nt);
1161 }
1162
1163 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1164 {
1165         struct ntb_queue_entry *entry;
1166         void *cb_data;
1167         unsigned int len;
1168         unsigned long irqflags;
1169
1170         spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1171
1172         while (!list_empty(&qp->rx_post_q)) {
1173                 entry = list_first_entry(&qp->rx_post_q,
1174                                          struct ntb_queue_entry, entry);
1175                 if (!(entry->flags & DESC_DONE_FLAG))
1176                         break;
1177
1178                 entry->rx_hdr->flags = 0;
1179                 iowrite32(entry->index, &qp->rx_info->entry);
1180
1181                 cb_data = entry->cb_data;
1182                 len = entry->len;
1183
1184                 list_move_tail(&entry->entry, &qp->rx_free_q);
1185
1186                 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1187
1188                 if (qp->rx_handler && qp->client_ready)
1189                         qp->rx_handler(qp, qp->cb_data, cb_data, len);
1190
1191                 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1192         }
1193
1194         spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1195 }
1196
1197 static void ntb_rx_copy_callback(void *data)
1198 {
1199         struct ntb_queue_entry *entry = data;
1200
1201         entry->flags |= DESC_DONE_FLAG;
1202
1203         ntb_complete_rxc(entry->qp);
1204 }
1205
1206 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1207 {
1208         void *buf = entry->buf;
1209         size_t len = entry->len;
1210
1211         memcpy(buf, offset, len);
1212
1213         /* Ensure that the data is fully copied out before clearing the flag */
1214         wmb();
1215
1216         ntb_rx_copy_callback(entry);
1217 }
1218
1219 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1220 {
1221         struct dma_async_tx_descriptor *txd;
1222         struct ntb_transport_qp *qp = entry->qp;
1223         struct dma_chan *chan = qp->dma_chan;
1224         struct dma_device *device;
1225         size_t pay_off, buff_off, len;
1226         struct dmaengine_unmap_data *unmap;
1227         dma_cookie_t cookie;
1228         void *buf = entry->buf;
1229
1230         len = entry->len;
1231
1232         if (!chan)
1233                 goto err;
1234
1235         if (len < copy_bytes)
1236                 goto err_wait;
1237
1238         device = chan->device;
1239         pay_off = (size_t)offset & ~PAGE_MASK;
1240         buff_off = (size_t)buf & ~PAGE_MASK;
1241
1242         if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1243                 goto err_wait;
1244
1245         unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1246         if (!unmap)
1247                 goto err_wait;
1248
1249         unmap->len = len;
1250         unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1251                                       pay_off, len, DMA_TO_DEVICE);
1252         if (dma_mapping_error(device->dev, unmap->addr[0]))
1253                 goto err_get_unmap;
1254
1255         unmap->to_cnt = 1;
1256
1257         unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1258                                       buff_off, len, DMA_FROM_DEVICE);
1259         if (dma_mapping_error(device->dev, unmap->addr[1]))
1260                 goto err_get_unmap;
1261
1262         unmap->from_cnt = 1;
1263
1264         txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1265                                              unmap->addr[0], len,
1266                                              DMA_PREP_INTERRUPT);
1267         if (!txd)
1268                 goto err_get_unmap;
1269
1270         txd->callback = ntb_rx_copy_callback;
1271         txd->callback_param = entry;
1272         dma_set_unmap(txd, unmap);
1273
1274         cookie = dmaengine_submit(txd);
1275         if (dma_submit_error(cookie))
1276                 goto err_set_unmap;
1277
1278         dmaengine_unmap_put(unmap);
1279
1280         qp->last_cookie = cookie;
1281
1282         qp->rx_async++;
1283
1284         return;
1285
1286 err_set_unmap:
1287         dmaengine_unmap_put(unmap);
1288 err_get_unmap:
1289         dmaengine_unmap_put(unmap);
1290 err_wait:
1291         /* If the callbacks come out of order, the writing of the index to the
1292          * last completed will be out of order.  This may result in the
1293          * receive stalling forever.
1294          */
1295         dma_sync_wait(chan, qp->last_cookie);
1296 err:
1297         ntb_memcpy_rx(entry, offset);
1298         qp->rx_memcpy++;
1299 }
1300
1301 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1302 {
1303         struct ntb_payload_header *hdr;
1304         struct ntb_queue_entry *entry;
1305         void *offset;
1306
1307         offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1308         hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1309
1310         dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1311                 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1312
1313         if (!(hdr->flags & DESC_DONE_FLAG)) {
1314                 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1315                 qp->rx_ring_empty++;
1316                 return -EAGAIN;
1317         }
1318
1319         if (hdr->flags & LINK_DOWN_FLAG) {
1320                 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1321                 ntb_qp_link_down(qp);
1322                 hdr->flags = 0;
1323                 return -EAGAIN;
1324         }
1325
1326         if (hdr->ver != (u32)qp->rx_pkts) {
1327                 dev_dbg(&qp->ndev->pdev->dev,
1328                         "version mismatch, expected %llu - got %u\n",
1329                         qp->rx_pkts, hdr->ver);
1330                 qp->rx_err_ver++;
1331                 return -EIO;
1332         }
1333
1334         entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1335         if (!entry) {
1336                 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1337                 qp->rx_err_no_buf++;
1338                 return -EAGAIN;
1339         }
1340
1341         entry->rx_hdr = hdr;
1342         entry->index = qp->rx_index;
1343
1344         if (hdr->len > entry->len) {
1345                 dev_dbg(&qp->ndev->pdev->dev,
1346                         "receive buffer overflow! Wanted %d got %d\n",
1347                         hdr->len, entry->len);
1348                 qp->rx_err_oflow++;
1349
1350                 entry->len = -EIO;
1351                 entry->flags |= DESC_DONE_FLAG;
1352
1353                 ntb_complete_rxc(qp);
1354         } else {
1355                 dev_dbg(&qp->ndev->pdev->dev,
1356                         "RX OK index %u ver %u size %d into buf size %d\n",
1357                         qp->rx_index, hdr->ver, hdr->len, entry->len);
1358
1359                 qp->rx_bytes += hdr->len;
1360                 qp->rx_pkts++;
1361
1362                 entry->len = hdr->len;
1363
1364                 ntb_async_rx(entry, offset);
1365         }
1366
1367         qp->rx_index++;
1368         qp->rx_index %= qp->rx_max_entry;
1369
1370         return 0;
1371 }
1372
1373 static void ntb_transport_rxc_db(unsigned long data)
1374 {
1375         struct ntb_transport_qp *qp = (void *)data;
1376         int rc, i;
1377
1378         dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1379                 __func__, qp->qp_num);
1380
1381         /* Limit the number of packets processed in a single interrupt to
1382          * provide fairness to others
1383          */
1384         for (i = 0; i < qp->rx_max_entry; i++) {
1385                 rc = ntb_process_rxc(qp);
1386                 if (rc)
1387                         break;
1388         }
1389
1390         if (i && qp->dma_chan)
1391                 dma_async_issue_pending(qp->dma_chan);
1392
1393         if (i == qp->rx_max_entry) {
1394                 /* there is more work to do */
1395                 tasklet_schedule(&qp->rxc_db_work);
1396         } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1397                 /* the doorbell bit is set: clear it */
1398                 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1399                 /* ntb_db_read ensures ntb_db_clear write is committed */
1400                 ntb_db_read(qp->ndev);
1401
1402                 /* an interrupt may have arrived between finishing
1403                  * ntb_process_rxc and clearing the doorbell bit:
1404                  * there might be some more work to do.
1405                  */
1406                 tasklet_schedule(&qp->rxc_db_work);
1407         }
1408 }
1409
1410 static void ntb_tx_copy_callback(void *data)
1411 {
1412         struct ntb_queue_entry *entry = data;
1413         struct ntb_transport_qp *qp = entry->qp;
1414         struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1415
1416         iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1417
1418         ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1419
1420         /* The entry length can only be zero if the packet is intended to be a
1421          * "link down" or similar.  Since no payload is being sent in these
1422          * cases, there is nothing to add to the completion queue.
1423          */
1424         if (entry->len > 0) {
1425                 qp->tx_bytes += entry->len;
1426
1427                 if (qp->tx_handler)
1428                         qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1429                                        entry->len);
1430         }
1431
1432         ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1433 }
1434
1435 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1436 {
1437 #ifdef ARCH_HAS_NOCACHE_UACCESS
1438         /*
1439          * Using non-temporal mov to improve performance on non-cached
1440          * writes, even though we aren't actually copying from user space.
1441          */
1442         __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1443 #else
1444         memcpy_toio(offset, entry->buf, entry->len);
1445 #endif
1446
1447         /* Ensure that the data is fully copied out before setting the flags */
1448         wmb();
1449
1450         ntb_tx_copy_callback(entry);
1451 }
1452
1453 static void ntb_async_tx(struct ntb_transport_qp *qp,
1454                          struct ntb_queue_entry *entry)
1455 {
1456         struct ntb_payload_header __iomem *hdr;
1457         struct dma_async_tx_descriptor *txd;
1458         struct dma_chan *chan = qp->dma_chan;
1459         struct dma_device *device;
1460         size_t dest_off, buff_off;
1461         struct dmaengine_unmap_data *unmap;
1462         dma_addr_t dest;
1463         dma_cookie_t cookie;
1464         void __iomem *offset;
1465         size_t len = entry->len;
1466         void *buf = entry->buf;
1467
1468         offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1469         hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1470         entry->tx_hdr = hdr;
1471
1472         iowrite32(entry->len, &hdr->len);
1473         iowrite32((u32)qp->tx_pkts, &hdr->ver);
1474
1475         if (!chan)
1476                 goto err;
1477
1478         if (len < copy_bytes)
1479                 goto err;
1480
1481         device = chan->device;
1482         dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1483         buff_off = (size_t)buf & ~PAGE_MASK;
1484         dest_off = (size_t)dest & ~PAGE_MASK;
1485
1486         if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1487                 goto err;
1488
1489         unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1490         if (!unmap)
1491                 goto err;
1492
1493         unmap->len = len;
1494         unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1495                                       buff_off, len, DMA_TO_DEVICE);
1496         if (dma_mapping_error(device->dev, unmap->addr[0]))
1497                 goto err_get_unmap;
1498
1499         unmap->to_cnt = 1;
1500
1501         txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1502                                              DMA_PREP_INTERRUPT);
1503         if (!txd)
1504                 goto err_get_unmap;
1505
1506         txd->callback = ntb_tx_copy_callback;
1507         txd->callback_param = entry;
1508         dma_set_unmap(txd, unmap);
1509
1510         cookie = dmaengine_submit(txd);
1511         if (dma_submit_error(cookie))
1512                 goto err_set_unmap;
1513
1514         dmaengine_unmap_put(unmap);
1515
1516         dma_async_issue_pending(chan);
1517         qp->tx_async++;
1518
1519         return;
1520 err_set_unmap:
1521         dmaengine_unmap_put(unmap);
1522 err_get_unmap:
1523         dmaengine_unmap_put(unmap);
1524 err:
1525         ntb_memcpy_tx(entry, offset);
1526         qp->tx_memcpy++;
1527 }
1528
1529 static int ntb_process_tx(struct ntb_transport_qp *qp,
1530                           struct ntb_queue_entry *entry)
1531 {
1532         if (qp->tx_index == qp->remote_rx_info->entry) {
1533                 qp->tx_ring_full++;
1534                 return -EAGAIN;
1535         }
1536
1537         if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1538                 if (qp->tx_handler)
1539                         qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1540
1541                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1542                              &qp->tx_free_q);
1543                 return 0;
1544         }
1545
1546         ntb_async_tx(qp, entry);
1547
1548         qp->tx_index++;
1549         qp->tx_index %= qp->tx_max_entry;
1550
1551         qp->tx_pkts++;
1552
1553         return 0;
1554 }
1555
1556 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1557 {
1558         struct pci_dev *pdev = qp->ndev->pdev;
1559         struct ntb_queue_entry *entry;
1560         int i, rc;
1561
1562         if (!qp->link_is_up)
1563                 return;
1564
1565         dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1566
1567         for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1568                 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1569                 if (entry)
1570                         break;
1571                 msleep(100);
1572         }
1573
1574         if (!entry)
1575                 return;
1576
1577         entry->cb_data = NULL;
1578         entry->buf = NULL;
1579         entry->len = 0;
1580         entry->flags = LINK_DOWN_FLAG;
1581
1582         rc = ntb_process_tx(qp, entry);
1583         if (rc)
1584                 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1585                         qp->qp_num);
1586
1587         ntb_qp_link_down_reset(qp);
1588 }
1589
1590 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1591 {
1592         return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1593 }
1594
1595 /**
1596  * ntb_transport_create_queue - Create a new NTB transport layer queue
1597  * @rx_handler: receive callback function
1598  * @tx_handler: transmit callback function
1599  * @event_handler: event callback function
1600  *
1601  * Create a new NTB transport layer queue and provide the queue with a callback
1602  * routine for both transmit and receive.  The receive callback routine will be
1603  * used to pass up data when the transport has received it on the queue.   The
1604  * transmit callback routine will be called when the transport has completed the
1605  * transmission of the data on the queue and the data is ready to be freed.
1606  *
1607  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1608  */
1609 struct ntb_transport_qp *
1610 ntb_transport_create_queue(void *data, struct device *client_dev,
1611                            const struct ntb_queue_handlers *handlers)
1612 {
1613         struct ntb_dev *ndev;
1614         struct pci_dev *pdev;
1615         struct ntb_transport_ctx *nt;
1616         struct ntb_queue_entry *entry;
1617         struct ntb_transport_qp *qp;
1618         u64 qp_bit;
1619         unsigned int free_queue;
1620         dma_cap_mask_t dma_mask;
1621         int node;
1622         int i;
1623
1624         ndev = dev_ntb(client_dev->parent);
1625         pdev = ndev->pdev;
1626         nt = ndev->ctx;
1627
1628         node = dev_to_node(&ndev->dev);
1629
1630         free_queue = ffs(nt->qp_bitmap);
1631         if (!free_queue)
1632                 goto err;
1633
1634         /* decrement free_queue to make it zero based */
1635         free_queue--;
1636
1637         qp = &nt->qp_vec[free_queue];
1638         qp_bit = BIT_ULL(qp->qp_num);
1639
1640         nt->qp_bitmap_free &= ~qp_bit;
1641
1642         qp->cb_data = data;
1643         qp->rx_handler = handlers->rx_handler;
1644         qp->tx_handler = handlers->tx_handler;
1645         qp->event_handler = handlers->event_handler;
1646
1647         dma_cap_zero(dma_mask);
1648         dma_cap_set(DMA_MEMCPY, dma_mask);
1649
1650         if (use_dma) {
1651                 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
1652                                                    (void *)(unsigned long)node);
1653                 if (!qp->dma_chan)
1654                         dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
1655         } else {
1656                 qp->dma_chan = NULL;
1657         }
1658         dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
1659
1660         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1661                 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1662                 if (!entry)
1663                         goto err1;
1664
1665                 entry->qp = qp;
1666                 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1667                              &qp->rx_free_q);
1668         }
1669
1670         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1671                 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1672                 if (!entry)
1673                         goto err2;
1674
1675                 entry->qp = qp;
1676                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1677                              &qp->tx_free_q);
1678         }
1679
1680         ntb_db_clear(qp->ndev, qp_bit);
1681         ntb_db_clear_mask(qp->ndev, qp_bit);
1682
1683         dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1684
1685         return qp;
1686
1687 err2:
1688         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1689                 kfree(entry);
1690 err1:
1691         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1692                 kfree(entry);
1693         if (qp->dma_chan)
1694                 dma_release_channel(qp->dma_chan);
1695         nt->qp_bitmap_free |= qp_bit;
1696 err:
1697         return NULL;
1698 }
1699 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1700
1701 /**
1702  * ntb_transport_free_queue - Frees NTB transport queue
1703  * @qp: NTB queue to be freed
1704  *
1705  * Frees NTB transport queue
1706  */
1707 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1708 {
1709         struct pci_dev *pdev;
1710         struct ntb_queue_entry *entry;
1711         u64 qp_bit;
1712
1713         if (!qp)
1714                 return;
1715
1716         pdev = qp->ndev->pdev;
1717
1718         if (qp->dma_chan) {
1719                 struct dma_chan *chan = qp->dma_chan;
1720                 /* Putting the dma_chan to NULL will force any new traffic to be
1721                  * processed by the CPU instead of the DAM engine
1722                  */
1723                 qp->dma_chan = NULL;
1724
1725                 /* Try to be nice and wait for any queued DMA engine
1726                  * transactions to process before smashing it with a rock
1727                  */
1728                 dma_sync_wait(chan, qp->last_cookie);
1729                 dmaengine_terminate_all(chan);
1730                 dma_release_channel(chan);
1731         }
1732
1733         qp_bit = BIT_ULL(qp->qp_num);
1734
1735         ntb_db_set_mask(qp->ndev, qp_bit);
1736         tasklet_disable(&qp->rxc_db_work);
1737
1738         cancel_delayed_work_sync(&qp->link_work);
1739
1740         qp->cb_data = NULL;
1741         qp->rx_handler = NULL;
1742         qp->tx_handler = NULL;
1743         qp->event_handler = NULL;
1744
1745         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1746                 kfree(entry);
1747
1748         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1749                 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1750                 kfree(entry);
1751         }
1752
1753         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1754                 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1755                 kfree(entry);
1756         }
1757
1758         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1759                 kfree(entry);
1760
1761         qp->transport->qp_bitmap_free |= qp_bit;
1762
1763         dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1764 }
1765 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1766
1767 /**
1768  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1769  * @qp: NTB queue to be freed
1770  * @len: pointer to variable to write enqueued buffers length
1771  *
1772  * Dequeues unused buffers from receive queue.  Should only be used during
1773  * shutdown of qp.
1774  *
1775  * RETURNS: NULL error value on error, or void* for success.
1776  */
1777 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1778 {
1779         struct ntb_queue_entry *entry;
1780         void *buf;
1781
1782         if (!qp || qp->client_ready)
1783                 return NULL;
1784
1785         entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1786         if (!entry)
1787                 return NULL;
1788
1789         buf = entry->cb_data;
1790         *len = entry->len;
1791
1792         ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1793
1794         return buf;
1795 }
1796 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1797
1798 /**
1799  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1800  * @qp: NTB transport layer queue the entry is to be enqueued on
1801  * @cb: per buffer pointer for callback function to use
1802  * @data: pointer to data buffer that incoming packets will be copied into
1803  * @len: length of the data buffer
1804  *
1805  * Enqueue a new receive buffer onto the transport queue into which a NTB
1806  * payload can be received into.
1807  *
1808  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1809  */
1810 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1811                              unsigned int len)
1812 {
1813         struct ntb_queue_entry *entry;
1814
1815         if (!qp)
1816                 return -EINVAL;
1817
1818         entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1819         if (!entry)
1820                 return -ENOMEM;
1821
1822         entry->cb_data = cb;
1823         entry->buf = data;
1824         entry->len = len;
1825         entry->flags = 0;
1826
1827         ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1828
1829         tasklet_schedule(&qp->rxc_db_work);
1830
1831         return 0;
1832 }
1833 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1834
1835 /**
1836  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1837  * @qp: NTB transport layer queue the entry is to be enqueued on
1838  * @cb: per buffer pointer for callback function to use
1839  * @data: pointer to data buffer that will be sent
1840  * @len: length of the data buffer
1841  *
1842  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1843  * payload will be transmitted.  This assumes that a lock is being held to
1844  * serialize access to the qp.
1845  *
1846  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1847  */
1848 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1849                              unsigned int len)
1850 {
1851         struct ntb_queue_entry *entry;
1852         int rc;
1853
1854         if (!qp || !qp->link_is_up || !len)
1855                 return -EINVAL;
1856
1857         entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1858         if (!entry) {
1859                 qp->tx_err_no_buf++;
1860                 return -EBUSY;
1861         }
1862
1863         entry->cb_data = cb;
1864         entry->buf = data;
1865         entry->len = len;
1866         entry->flags = 0;
1867
1868         rc = ntb_process_tx(qp, entry);
1869         if (rc)
1870                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1871                              &qp->tx_free_q);
1872
1873         return rc;
1874 }
1875 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1876
1877 /**
1878  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1879  * @qp: NTB transport layer queue to be enabled
1880  *
1881  * Notify NTB transport layer of client readiness to use queue
1882  */
1883 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1884 {
1885         if (!qp)
1886                 return;
1887
1888         qp->client_ready = true;
1889
1890         if (qp->transport->link_is_up)
1891                 schedule_delayed_work(&qp->link_work, 0);
1892 }
1893 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1894
1895 /**
1896  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1897  * @qp: NTB transport layer queue to be disabled
1898  *
1899  * Notify NTB transport layer of client's desire to no longer receive data on
1900  * transport queue specified.  It is the client's responsibility to ensure all
1901  * entries on queue are purged or otherwise handled appropriately.
1902  */
1903 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1904 {
1905         struct pci_dev *pdev;
1906         int val;
1907
1908         if (!qp)
1909                 return;
1910
1911         pdev = qp->ndev->pdev;
1912         qp->client_ready = false;
1913
1914         val = ntb_spad_read(qp->ndev, QP_LINKS);
1915
1916         ntb_peer_spad_write(qp->ndev, QP_LINKS,
1917                             val & ~BIT(qp->qp_num));
1918
1919         if (qp->link_is_up)
1920                 ntb_send_link_down(qp);
1921         else
1922                 cancel_delayed_work_sync(&qp->link_work);
1923 }
1924 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1925
1926 /**
1927  * ntb_transport_link_query - Query transport link state
1928  * @qp: NTB transport layer queue to be queried
1929  *
1930  * Query connectivity to the remote system of the NTB transport queue
1931  *
1932  * RETURNS: true for link up or false for link down
1933  */
1934 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1935 {
1936         if (!qp)
1937                 return false;
1938
1939         return qp->link_is_up;
1940 }
1941 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1942
1943 /**
1944  * ntb_transport_qp_num - Query the qp number
1945  * @qp: NTB transport layer queue to be queried
1946  *
1947  * Query qp number of the NTB transport queue
1948  *
1949  * RETURNS: a zero based number specifying the qp number
1950  */
1951 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1952 {
1953         if (!qp)
1954                 return 0;
1955
1956         return qp->qp_num;
1957 }
1958 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1959
1960 /**
1961  * ntb_transport_max_size - Query the max payload size of a qp
1962  * @qp: NTB transport layer queue to be queried
1963  *
1964  * Query the maximum payload size permissible on the given qp
1965  *
1966  * RETURNS: the max payload size of a qp
1967  */
1968 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1969 {
1970         unsigned int max;
1971
1972         if (!qp)
1973                 return 0;
1974
1975         if (!qp->dma_chan)
1976                 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1977
1978         /* If DMA engine usage is possible, try to find the max size for that */
1979         max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1980         max -= max % (1 << qp->dma_chan->device->copy_align);
1981
1982         return max;
1983 }
1984 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1985
1986 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1987 {
1988         unsigned int head = qp->tx_index;
1989         unsigned int tail = qp->remote_rx_info->entry;
1990
1991         return tail > head ? tail - head : qp->tx_max_entry + tail - head;
1992 }
1993 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
1994
1995 static void ntb_transport_doorbell_callback(void *data, int vector)
1996 {
1997         struct ntb_transport_ctx *nt = data;
1998         struct ntb_transport_qp *qp;
1999         u64 db_bits;
2000         unsigned int qp_num;
2001
2002         db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2003                    ntb_db_vector_mask(nt->ndev, vector));
2004
2005         while (db_bits) {
2006                 qp_num = __ffs(db_bits);
2007                 qp = &nt->qp_vec[qp_num];
2008
2009                 tasklet_schedule(&qp->rxc_db_work);
2010
2011                 db_bits &= ~BIT_ULL(qp_num);
2012         }
2013 }
2014
2015 static const struct ntb_ctx_ops ntb_transport_ops = {
2016         .link_event = ntb_transport_event_callback,
2017         .db_event = ntb_transport_doorbell_callback,
2018 };
2019
2020 static struct ntb_client ntb_transport_client = {
2021         .ops = {
2022                 .probe = ntb_transport_probe,
2023                 .remove = ntb_transport_free,
2024         },
2025 };
2026
2027 static int __init ntb_transport_init(void)
2028 {
2029         int rc;
2030
2031         pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2032
2033         if (debugfs_initialized())
2034                 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2035
2036         rc = bus_register(&ntb_transport_bus);
2037         if (rc)
2038                 goto err_bus;
2039
2040         rc = ntb_register_client(&ntb_transport_client);
2041         if (rc)
2042                 goto err_client;
2043
2044         return 0;
2045
2046 err_client:
2047         bus_unregister(&ntb_transport_bus);
2048 err_bus:
2049         debugfs_remove_recursive(nt_debugfs_dir);
2050         return rc;
2051 }
2052 module_init(ntb_transport_init);
2053
2054 static void __exit ntb_transport_exit(void)
2055 {
2056         debugfs_remove_recursive(nt_debugfs_dir);
2057
2058         ntb_unregister_client(&ntb_transport_client);
2059         bus_unregister(&ntb_transport_bus);
2060 }
2061 module_exit(ntb_transport_exit);