2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
49 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50 static __be64 mlx5_ib_update_mtt_emergency_buffer[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
52 __aligned(MLX5_UMR_ALIGN);
53 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
56 static int clean_mr(struct mlx5_ib_mr *mr);
58 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
62 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev->mr_srcu);
70 static int order2idx(struct mlx5_ib_dev *dev, int order)
72 struct mlx5_mr_cache *cache = &dev->cache;
74 if (order < cache->ent[0].order)
77 return order - cache->ent[0].order;
80 static void reg_mr_callback(int status, void *context)
82 struct mlx5_ib_mr *mr = context;
83 struct mlx5_ib_dev *dev = mr->dev;
84 struct mlx5_mr_cache *cache = &dev->cache;
85 int c = order2idx(dev, mr->order);
86 struct mlx5_cache_ent *ent = &cache->ent[c];
89 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
92 spin_lock_irqsave(&ent->lock, flags);
94 spin_unlock_irqrestore(&ent->lock, flags);
96 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
99 mod_timer(&dev->delay_timer, jiffies + HZ);
103 if (mr->out.hdr.status) {
104 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
106 be32_to_cpu(mr->out.hdr.syndrome));
109 mod_timer(&dev->delay_timer, jiffies + HZ);
113 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
114 key = dev->mdev->priv.mkey_key++;
115 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
116 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
118 cache->last_add = jiffies;
120 spin_lock_irqsave(&ent->lock, flags);
121 list_add_tail(&mr->list, &ent->head);
124 spin_unlock_irqrestore(&ent->lock, flags);
126 write_lock_irqsave(&table->lock, flags);
127 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
130 pr_err("Error inserting to mr tree. 0x%x\n", -err);
131 write_unlock_irqrestore(&table->lock, flags);
134 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
136 struct mlx5_mr_cache *cache = &dev->cache;
137 struct mlx5_cache_ent *ent = &cache->ent[c];
138 struct mlx5_create_mkey_mbox_in *in;
139 struct mlx5_ib_mr *mr;
140 int npages = 1 << ent->order;
144 in = kzalloc(sizeof(*in), GFP_KERNEL);
148 for (i = 0; i < num; i++) {
149 if (ent->pending >= MAX_PENDING_REG_MR) {
154 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
159 mr->order = ent->order;
162 in->seg.status = MLX5_MKEY_STATUS_FREE;
163 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
164 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
165 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
166 in->seg.log2_page_size = 12;
168 spin_lock_irq(&ent->lock);
170 spin_unlock_irq(&ent->lock);
171 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
172 sizeof(*in), reg_mr_callback,
175 spin_lock_irq(&ent->lock);
177 spin_unlock_irq(&ent->lock);
178 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
188 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
190 struct mlx5_mr_cache *cache = &dev->cache;
191 struct mlx5_cache_ent *ent = &cache->ent[c];
192 struct mlx5_ib_mr *mr;
196 for (i = 0; i < num; i++) {
197 spin_lock_irq(&ent->lock);
198 if (list_empty(&ent->head)) {
199 spin_unlock_irq(&ent->lock);
202 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
206 spin_unlock_irq(&ent->lock);
207 err = destroy_mkey(dev, mr);
209 mlx5_ib_warn(dev, "failed destroy mkey\n");
215 static ssize_t size_write(struct file *filp, const char __user *buf,
216 size_t count, loff_t *pos)
218 struct mlx5_cache_ent *ent = filp->private_data;
219 struct mlx5_ib_dev *dev = ent->dev;
225 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
228 c = order2idx(dev, ent->order);
229 lbuf[sizeof(lbuf) - 1] = 0;
231 if (sscanf(lbuf, "%u", &var) != 1)
234 if (var < ent->limit)
237 if (var > ent->size) {
239 err = add_keys(dev, c, var - ent->size);
240 if (err && err != -EAGAIN)
243 usleep_range(3000, 5000);
245 } else if (var < ent->size) {
246 remove_keys(dev, c, ent->size - var);
252 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
255 struct mlx5_cache_ent *ent = filp->private_data;
262 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
266 if (copy_to_user(buf, lbuf, err))
274 static const struct file_operations size_fops = {
275 .owner = THIS_MODULE,
281 static ssize_t limit_write(struct file *filp, const char __user *buf,
282 size_t count, loff_t *pos)
284 struct mlx5_cache_ent *ent = filp->private_data;
285 struct mlx5_ib_dev *dev = ent->dev;
291 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
294 c = order2idx(dev, ent->order);
295 lbuf[sizeof(lbuf) - 1] = 0;
297 if (sscanf(lbuf, "%u", &var) != 1)
305 if (ent->cur < ent->limit) {
306 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
314 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
317 struct mlx5_cache_ent *ent = filp->private_data;
324 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
328 if (copy_to_user(buf, lbuf, err))
336 static const struct file_operations limit_fops = {
337 .owner = THIS_MODULE,
339 .write = limit_write,
343 static int someone_adding(struct mlx5_mr_cache *cache)
347 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
348 if (cache->ent[i].cur < cache->ent[i].limit)
355 static void __cache_work_func(struct mlx5_cache_ent *ent)
357 struct mlx5_ib_dev *dev = ent->dev;
358 struct mlx5_mr_cache *cache = &dev->cache;
359 int i = order2idx(dev, ent->order);
365 ent = &dev->cache.ent[i];
366 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
367 err = add_keys(dev, i, 1);
368 if (ent->cur < 2 * ent->limit) {
369 if (err == -EAGAIN) {
370 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
372 queue_delayed_work(cache->wq, &ent->dwork,
373 msecs_to_jiffies(3));
375 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
377 queue_delayed_work(cache->wq, &ent->dwork,
378 msecs_to_jiffies(1000));
380 queue_work(cache->wq, &ent->work);
383 } else if (ent->cur > 2 * ent->limit) {
384 if (!someone_adding(cache) &&
385 time_after(jiffies, cache->last_add + 300 * HZ)) {
386 remove_keys(dev, i, 1);
387 if (ent->cur > ent->limit)
388 queue_work(cache->wq, &ent->work);
390 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
395 static void delayed_cache_work_func(struct work_struct *work)
397 struct mlx5_cache_ent *ent;
399 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
400 __cache_work_func(ent);
403 static void cache_work_func(struct work_struct *work)
405 struct mlx5_cache_ent *ent;
407 ent = container_of(work, struct mlx5_cache_ent, work);
408 __cache_work_func(ent);
411 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
413 struct mlx5_mr_cache *cache = &dev->cache;
414 struct mlx5_ib_mr *mr = NULL;
415 struct mlx5_cache_ent *ent;
419 c = order2idx(dev, order);
420 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
421 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
425 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
426 ent = &cache->ent[i];
428 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
430 spin_lock_irq(&ent->lock);
431 if (!list_empty(&ent->head)) {
432 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
436 spin_unlock_irq(&ent->lock);
437 if (ent->cur < ent->limit)
438 queue_work(cache->wq, &ent->work);
441 spin_unlock_irq(&ent->lock);
443 queue_work(cache->wq, &ent->work);
447 cache->ent[c].miss++;
452 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
454 struct mlx5_mr_cache *cache = &dev->cache;
455 struct mlx5_cache_ent *ent;
459 c = order2idx(dev, mr->order);
460 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
461 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
464 ent = &cache->ent[c];
465 spin_lock_irq(&ent->lock);
466 list_add_tail(&mr->list, &ent->head);
468 if (ent->cur > 2 * ent->limit)
470 spin_unlock_irq(&ent->lock);
473 queue_work(cache->wq, &ent->work);
476 static void clean_keys(struct mlx5_ib_dev *dev, int c)
478 struct mlx5_mr_cache *cache = &dev->cache;
479 struct mlx5_cache_ent *ent = &cache->ent[c];
480 struct mlx5_ib_mr *mr;
483 cancel_delayed_work(&ent->dwork);
485 spin_lock_irq(&ent->lock);
486 if (list_empty(&ent->head)) {
487 spin_unlock_irq(&ent->lock);
490 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
494 spin_unlock_irq(&ent->lock);
495 err = destroy_mkey(dev, mr);
497 mlx5_ib_warn(dev, "failed destroy mkey\n");
503 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
505 struct mlx5_mr_cache *cache = &dev->cache;
506 struct mlx5_cache_ent *ent;
509 if (!mlx5_debugfs_root)
512 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
516 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
517 ent = &cache->ent[i];
518 sprintf(ent->name, "%d", ent->order);
519 ent->dir = debugfs_create_dir(ent->name, cache->root);
523 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
528 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
533 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
538 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
547 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
549 if (!mlx5_debugfs_root)
552 debugfs_remove_recursive(dev->cache.root);
555 static void delay_time_func(unsigned long ctx)
557 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
562 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
564 struct mlx5_mr_cache *cache = &dev->cache;
565 struct mlx5_cache_ent *ent;
570 cache->wq = create_singlethread_workqueue("mkey_cache");
572 mlx5_ib_warn(dev, "failed to create work queue\n");
576 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
577 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
578 INIT_LIST_HEAD(&cache->ent[i].head);
579 spin_lock_init(&cache->ent[i].lock);
581 ent = &cache->ent[i];
582 INIT_LIST_HEAD(&ent->head);
583 spin_lock_init(&ent->lock);
587 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
588 limit = dev->mdev->profile->mr_cache[i].limit;
592 INIT_WORK(&ent->work, cache_work_func);
593 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
595 queue_work(cache->wq, &ent->work);
598 err = mlx5_mr_cache_debugfs_init(dev);
600 mlx5_ib_warn(dev, "cache debugfs failure\n");
605 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
609 dev->cache.stopped = 1;
610 flush_workqueue(dev->cache.wq);
612 mlx5_mr_cache_debugfs_cleanup(dev);
614 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
617 destroy_workqueue(dev->cache.wq);
618 del_timer_sync(&dev->delay_timer);
623 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
625 struct mlx5_ib_dev *dev = to_mdev(pd->device);
626 struct mlx5_core_dev *mdev = dev->mdev;
627 struct mlx5_create_mkey_mbox_in *in;
628 struct mlx5_mkey_seg *seg;
629 struct mlx5_ib_mr *mr;
632 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
634 return ERR_PTR(-ENOMEM);
636 in = kzalloc(sizeof(*in), GFP_KERNEL);
643 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
644 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
645 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
648 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
654 mr->ibmr.lkey = mr->mmr.key;
655 mr->ibmr.rkey = mr->mmr.key;
669 static int get_octo_len(u64 addr, u64 len, int page_size)
674 offset = addr & (page_size - 1);
675 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
676 return (npages + 1) / 2;
679 static int use_umr(int order)
681 return order <= MLX5_MAX_UMR_SHIFT;
684 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
685 struct ib_sge *sg, u64 dma, int n, u32 key,
686 int page_shift, u64 virt_addr, u64 len,
689 struct mlx5_ib_dev *dev = to_mdev(pd->device);
690 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
693 sg->length = ALIGN(sizeof(u64) * n, 64);
694 sg->lkey = dev->umrc.pd->local_dma_lkey;
704 wr->opcode = MLX5_IB_WR_UMR;
707 umrwr->page_shift = page_shift;
709 umrwr->target.virt_addr = virt_addr;
711 umrwr->access_flags = access_flags;
715 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
716 struct ib_send_wr *wr, u32 key)
718 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
720 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
721 wr->opcode = MLX5_IB_WR_UMR;
725 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
727 struct mlx5_ib_umr_context *context;
732 err = ib_poll_cq(cq, 1, &wc);
734 pr_warn("poll cq error %d\n", err);
740 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
741 context->status = wc.status;
742 complete(&context->done);
744 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
747 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
748 u64 virt_addr, u64 len, int npages,
749 int page_shift, int order, int access_flags)
751 struct mlx5_ib_dev *dev = to_mdev(pd->device);
752 struct device *ddev = dev->ib_dev.dma_device;
753 struct umr_common *umrc = &dev->umrc;
754 struct mlx5_ib_umr_context umr_context;
755 struct ib_send_wr wr, *bad;
756 struct mlx5_ib_mr *mr;
765 for (i = 0; i < 1; i++) {
766 mr = alloc_cached_mr(dev, order);
770 err = add_keys(dev, order2idx(dev, order), 1);
771 if (err && err != -EAGAIN) {
772 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
778 return ERR_PTR(-EAGAIN);
780 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
781 * To avoid copying garbage after the pas array, we allocate
783 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
784 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
790 pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
791 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
792 /* Clear padding after the actual pages. */
793 memset(pas + npages, 0, size - npages * sizeof(u64));
795 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
796 if (dma_mapping_error(ddev, dma)) {
801 memset(&wr, 0, sizeof(wr));
802 wr.wr_id = (u64)(unsigned long)&umr_context;
803 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
804 virt_addr, len, access_flags);
806 mlx5_ib_init_umr_context(&umr_context);
808 err = ib_post_send(umrc->qp, &wr, &bad);
810 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
813 wait_for_completion(&umr_context.done);
814 if (umr_context.status != IB_WC_SUCCESS) {
815 mlx5_ib_warn(dev, "reg umr failed\n");
820 mr->mmr.iova = virt_addr;
822 mr->mmr.pd = to_mpd(pd)->pdn;
828 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
835 free_cached_mr(dev, mr);
842 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
843 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
846 struct mlx5_ib_dev *dev = mr->dev;
847 struct device *ddev = dev->ib_dev.dma_device;
848 struct umr_common *umrc = &dev->umrc;
849 struct mlx5_ib_umr_context umr_context;
850 struct ib_umem *umem = mr->umem;
854 struct ib_send_wr wr, *bad;
855 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
858 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
859 const int page_index_mask = page_index_alignment - 1;
860 size_t pages_mapped = 0;
861 size_t pages_to_map = 0;
862 size_t pages_iter = 0;
863 int use_emergency_buf = 0;
865 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
866 * so we need to align the offset and length accordingly */
867 if (start_page_index & page_index_mask) {
868 npages += start_page_index & page_index_mask;
869 start_page_index &= ~page_index_mask;
872 pages_to_map = ALIGN(npages, page_index_alignment);
874 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
877 size = sizeof(u64) * pages_to_map;
878 size = min_t(int, PAGE_SIZE, size);
879 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
880 * code, when we are called from an invalidation. The pas buffer must
881 * be 2k-aligned for Connect-IB. */
882 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
884 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
885 pas = mlx5_ib_update_mtt_emergency_buffer;
886 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
887 use_emergency_buf = 1;
888 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
889 memset(pas, 0, size);
891 pages_iter = size / sizeof(u64);
892 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
893 if (dma_mapping_error(ddev, dma)) {
894 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
899 for (pages_mapped = 0;
900 pages_mapped < pages_to_map && !err;
901 pages_mapped += pages_iter, start_page_index += pages_iter) {
902 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
904 npages = min_t(size_t,
906 ib_umem_num_pages(umem) - start_page_index);
909 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
910 start_page_index, npages, pas,
911 MLX5_IB_MTT_PRESENT);
912 /* Clear padding after the pages brought from the
914 memset(pas + npages, 0, size - npages * sizeof(u64));
917 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
919 memset(&wr, 0, sizeof(wr));
920 wr.wr_id = (u64)(unsigned long)&umr_context;
923 sg.length = ALIGN(npages * sizeof(u64),
924 MLX5_UMR_MTT_ALIGNMENT);
925 sg.lkey = dev->umrc.pd->local_dma_lkey;
927 wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
928 MLX5_IB_SEND_UMR_UPDATE_MTT;
931 wr.opcode = MLX5_IB_WR_UMR;
932 umrwr->npages = sg.length / sizeof(u64);
933 umrwr->page_shift = PAGE_SHIFT;
934 umrwr->mkey = mr->mmr.key;
935 umrwr->target.offset = start_page_index;
937 mlx5_ib_init_umr_context(&umr_context);
939 err = ib_post_send(umrc->qp, &wr, &bad);
941 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
943 wait_for_completion(&umr_context.done);
944 if (umr_context.status != IB_WC_SUCCESS) {
945 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
952 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
955 if (!use_emergency_buf)
956 free_page((unsigned long)pas);
958 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
964 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
965 u64 length, struct ib_umem *umem,
966 int npages, int page_shift,
969 struct mlx5_ib_dev *dev = to_mdev(pd->device);
970 struct mlx5_create_mkey_mbox_in *in;
971 struct mlx5_ib_mr *mr;
974 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
976 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
978 return ERR_PTR(-ENOMEM);
980 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
981 in = mlx5_vzalloc(inlen);
986 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
987 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
989 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
990 * in the page list submitted with the command. */
991 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
992 in->seg.flags = convert_access(access_flags) |
993 MLX5_ACCESS_MODE_MTT;
994 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
995 in->seg.start_addr = cpu_to_be64(virt_addr);
996 in->seg.len = cpu_to_be64(length);
997 in->seg.bsfs_octo_size = 0;
998 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
999 in->seg.log2_page_size = page_shift;
1000 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1001 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1003 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
1006 mlx5_ib_warn(dev, "create mkey failed\n");
1014 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
1024 return ERR_PTR(err);
1027 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1028 u64 virt_addr, int access_flags,
1029 struct ib_udata *udata)
1031 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1032 struct mlx5_ib_mr *mr = NULL;
1033 struct ib_umem *umem;
1040 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1041 start, virt_addr, length, access_flags);
1042 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
1045 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
1046 return (void *)umem;
1049 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
1051 mlx5_ib_warn(dev, "avoid zero region\n");
1056 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
1057 npages, ncont, order, page_shift);
1059 if (use_umr(order)) {
1060 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1061 order, access_flags);
1062 if (PTR_ERR(mr) == -EAGAIN) {
1063 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1066 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1068 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1073 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
1081 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
1084 mr->npages = npages;
1085 atomic_add(npages, &dev->mdev->priv.reg_pages);
1086 mr->ibmr.lkey = mr->mmr.key;
1087 mr->ibmr.rkey = mr->mmr.key;
1089 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1090 if (umem->odp_data) {
1092 * This barrier prevents the compiler from moving the
1093 * setting of umem->odp_data->private to point to our
1094 * MR, before reg_umr finished, to ensure that the MR
1095 * initialization have finished before starting to
1096 * handle invalidations.
1099 mr->umem->odp_data->private = mr;
1101 * Make sure we will see the new
1102 * umem->odp_data->private value in the invalidation
1103 * routines, before we can get page faults on the
1104 * MR. Page faults can happen once we put the MR in
1105 * the tree, below this line. Without the barrier,
1106 * there can be a fault handling and an invalidation
1107 * before umem->odp_data->private == mr is visible to
1108 * the invalidation handler.
1117 ib_umem_release(umem);
1118 return ERR_PTR(err);
1121 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1123 struct umr_common *umrc = &dev->umrc;
1124 struct mlx5_ib_umr_context umr_context;
1125 struct ib_send_wr wr, *bad;
1128 memset(&wr, 0, sizeof(wr));
1129 wr.wr_id = (u64)(unsigned long)&umr_context;
1130 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
1132 mlx5_ib_init_umr_context(&umr_context);
1134 err = ib_post_send(umrc->qp, &wr, &bad);
1137 mlx5_ib_dbg(dev, "err %d\n", err);
1140 wait_for_completion(&umr_context.done);
1143 if (umr_context.status != IB_WC_SUCCESS) {
1144 mlx5_ib_warn(dev, "unreg umr failed\n");
1154 static int clean_mr(struct mlx5_ib_mr *mr)
1156 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1157 int umred = mr->umred;
1161 if (mlx5_core_destroy_psv(dev->mdev,
1162 mr->sig->psv_memory.psv_idx))
1163 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1164 mr->sig->psv_memory.psv_idx);
1165 if (mlx5_core_destroy_psv(dev->mdev,
1166 mr->sig->psv_wire.psv_idx))
1167 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1168 mr->sig->psv_wire.psv_idx);
1174 err = destroy_mkey(dev, mr);
1176 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1181 err = unreg_umr(dev, mr);
1183 mlx5_ib_warn(dev, "failed unregister\n");
1186 free_cached_mr(dev, mr);
1195 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1197 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1198 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1199 int npages = mr->npages;
1200 struct ib_umem *umem = mr->umem;
1202 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1203 if (umem && umem->odp_data) {
1204 /* Prevent new page faults from succeeding */
1206 /* Wait for all running page-fault handlers to finish. */
1207 synchronize_srcu(&dev->mr_srcu);
1208 /* Destroy all page mappings */
1209 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1212 * We kill the umem before the MR for ODP,
1213 * so that there will not be any invalidations in
1214 * flight, looking at the *mr struct.
1216 ib_umem_release(umem);
1217 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1219 /* Avoid double-freeing the umem. */
1227 ib_umem_release(umem);
1228 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1234 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1235 enum ib_mr_type mr_type,
1238 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1239 struct mlx5_create_mkey_mbox_in *in;
1240 struct mlx5_ib_mr *mr;
1241 int access_mode, err;
1242 int ndescs = roundup(max_num_sg, 4);
1244 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1246 return ERR_PTR(-ENOMEM);
1248 in = kzalloc(sizeof(*in), GFP_KERNEL);
1254 in->seg.status = MLX5_MKEY_STATUS_FREE;
1255 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1256 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1257 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1259 if (mr_type == IB_MR_TYPE_MEM_REG) {
1260 access_mode = MLX5_ACCESS_MODE_MTT;
1261 in->seg.log2_page_size = PAGE_SHIFT;
1262 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1265 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1267 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1268 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1274 /* create mem & wire PSVs */
1275 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1280 access_mode = MLX5_ACCESS_MODE_KLM;
1281 mr->sig->psv_memory.psv_idx = psv_index[0];
1282 mr->sig->psv_wire.psv_idx = psv_index[1];
1284 mr->sig->sig_status_checked = true;
1285 mr->sig->sig_err_exists = false;
1286 /* Next UMR, Arm SIGERR */
1287 ++mr->sig->sigerr_count;
1289 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1294 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
1295 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
1298 goto err_destroy_psv;
1300 mr->ibmr.lkey = mr->mmr.key;
1301 mr->ibmr.rkey = mr->mmr.key;
1309 if (mlx5_core_destroy_psv(dev->mdev,
1310 mr->sig->psv_memory.psv_idx))
1311 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1312 mr->sig->psv_memory.psv_idx);
1313 if (mlx5_core_destroy_psv(dev->mdev,
1314 mr->sig->psv_wire.psv_idx))
1315 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1316 mr->sig->psv_wire.psv_idx);
1324 return ERR_PTR(err);
1327 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1330 struct mlx5_ib_fast_reg_page_list *mfrpl;
1331 int size = page_list_len * sizeof(u64);
1333 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1335 return ERR_PTR(-ENOMEM);
1337 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1338 if (!mfrpl->ibfrpl.page_list)
1341 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1344 if (!mfrpl->mapped_page_list)
1347 WARN_ON(mfrpl->map & 0x3f);
1349 return &mfrpl->ibfrpl;
1352 kfree(mfrpl->ibfrpl.page_list);
1354 return ERR_PTR(-ENOMEM);
1357 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1359 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1360 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1361 int size = page_list->max_page_list_len * sizeof(u64);
1363 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
1365 kfree(mfrpl->ibfrpl.page_list);
1369 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1370 struct ib_mr_status *mr_status)
1372 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1375 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1376 pr_err("Invalid status check mask\n");
1381 mr_status->fail_status = 0;
1382 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1385 pr_err("signature status check requested on a non-signature enabled MR\n");
1389 mmr->sig->sig_status_checked = true;
1390 if (!mmr->sig->sig_err_exists)
1393 if (ibmr->lkey == mmr->sig->err_item.key)
1394 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1395 sizeof(mr_status->sig_err));
1397 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1398 mr_status->sig_err.sig_err_offset = 0;
1399 mr_status->sig_err.key = mmr->sig->err_item.key;
1402 mmr->sig->sig_err_exists = false;
1403 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;