2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <rdma/ib_umem.h>
36 #include <linux/atomic.h>
41 module_param(use_dsgl, int, 0644);
42 MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
44 #define T4_ULPTX_MIN_IO 32
45 #define C4IW_MAX_INLINE_SIZE 96
46 #define T4_ULPTX_MAX_DMA 1024
47 #define C4IW_INLINE_THRESHOLD 128
49 static int inline_threshold = C4IW_INLINE_THRESHOLD;
50 module_param(inline_threshold, int, 0644);
51 MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
53 static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
54 u32 len, dma_addr_t data, int wait)
57 struct ulp_mem_io *req;
58 struct ulptx_sgl *sgl;
61 struct c4iw_wr_wait wr_wait;
66 c4iw_init_wr_wait(&wr_wait);
67 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
69 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
72 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0));
79 req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(data);
93 ret = c4iw_ofld_send(rdev, skb);
97 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
101 static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
105 struct ulp_mem_io *req;
106 struct ulptx_idata *sc;
107 u8 wr_len, *to_dp, *from_dp;
108 int copy_len, num_wqe, i, ret = 0;
109 struct c4iw_wr_wait wr_wait;
110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
119 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
120 c4iw_init_wr_wait(&wr_wait);
121 for (i = 0; i < num_wqe; i++) {
123 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
125 wr_len = roundup(sizeof *req + sizeof *sc +
126 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
128 skb = alloc_skb(wr_len, GFP_KERNEL);
131 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
133 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
134 memset(req, 0, wr_len);
135 INIT_ULPTX_WR(req, wr_len, 0, 0);
137 if (i == (num_wqe-1)) {
138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
143 req->wr.wr_mid = cpu_to_be32(
144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
153 sc = (struct ulptx_idata *)(req + 1);
154 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
157 to_dp = (u8 *)(sc + 1);
158 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
160 memcpy(to_dp, from_dp, copy_len);
162 memset(to_dp, 0, copy_len);
163 if (copy_len % T4_ULPTX_MIN_IO)
164 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
165 (copy_len % T4_ULPTX_MIN_IO));
166 ret = c4iw_ofld_send(rdev, skb);
169 len -= C4IW_MAX_INLINE_SIZE;
172 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
176 int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
184 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
185 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
189 while (remain > inline_threshold) {
190 if (remain < T4_ULPTX_MAX_DMA) {
191 if (remain & ~T4_ULPTX_MIN_IO)
192 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
196 dmalen = T4_ULPTX_MAX_DMA;
198 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
207 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
209 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
214 * write len bytes of data into addr (32B aligned address)
215 * If data is NULL, clear len byte of memory to zero.
217 static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
220 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
221 if (len > inline_threshold) {
222 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
223 printk_ratelimited(KERN_WARNING
225 " failure (non fatal)\n",
226 pci_name(rdev->lldi.pdev));
227 return _c4iw_write_mem_inline(rdev, addr, len,
232 return _c4iw_write_mem_inline(rdev, addr, len, data);
234 return _c4iw_write_mem_inline(rdev, addr, len, data);
238 * Build and write a TPT entry.
239 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
240 * pbl_size and pbl_addr
243 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
244 u32 *stag, u8 stag_state, u32 pdid,
245 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
246 int bind_enabled, u32 zbva, u64 to,
247 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
250 struct fw_ri_tpte tpt;
254 if (c4iw_fatal_error(rdev))
257 stag_state = stag_state > 0;
258 stag_idx = (*stag) >> 8;
260 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
261 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
264 mutex_lock(&rdev->stats.lock);
265 rdev->stats.stag.cur += 32;
266 if (rdev->stats.stag.cur > rdev->stats.stag.max)
267 rdev->stats.stag.max = rdev->stats.stag.cur;
268 mutex_unlock(&rdev->stats.lock);
269 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
271 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
272 __func__, stag_state, type, pdid, stag_idx);
274 /* write TPT entry */
276 memset(&tpt, 0, sizeof(tpt));
278 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
279 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
280 V_FW_RI_TPTE_STAGSTATE(stag_state) |
281 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
282 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
283 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
284 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
286 V_FW_RI_TPTE_PS(page_size));
287 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
288 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
289 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
290 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
291 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
292 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
293 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
295 err = write_adapter_mem(rdev, stag_idx +
296 (rdev->lldi.vr->stag.start >> 5),
299 if (reset_tpt_entry) {
300 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
301 mutex_lock(&rdev->stats.lock);
302 rdev->stats.stag.cur -= 32;
303 mutex_unlock(&rdev->stats.lock);
308 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
309 u32 pbl_addr, u32 pbl_size)
313 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
314 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
317 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
321 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
324 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
328 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
330 *stag = T4_STAG_UNSET;
331 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
335 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
337 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
341 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
342 u32 pbl_size, u32 pbl_addr)
344 *stag = T4_STAG_UNSET;
345 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
346 0UL, 0, 0, pbl_size, pbl_addr);
349 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
354 mhp->attr.stag = stag;
356 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
357 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
358 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
361 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
362 struct c4iw_mr *mhp, int shift)
364 u32 stag = T4_STAG_UNSET;
367 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
368 FW_RI_STAG_NSMR, mhp->attr.perms,
369 mhp->attr.mw_bind_enable, mhp->attr.zbva,
370 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
371 mhp->attr.pbl_size, mhp->attr.pbl_addr);
375 ret = finish_mem_reg(mhp, stag);
377 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
382 static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
383 struct c4iw_mr *mhp, int shift, int npages)
388 if (npages > mhp->attr.pbl_size)
391 stag = mhp->attr.stag;
392 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
393 FW_RI_STAG_NSMR, mhp->attr.perms,
394 mhp->attr.mw_bind_enable, mhp->attr.zbva,
395 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
396 mhp->attr.pbl_size, mhp->attr.pbl_addr);
400 ret = finish_mem_reg(mhp, stag);
402 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
408 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
410 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
413 if (!mhp->attr.pbl_addr)
416 mhp->attr.pbl_size = npages;
421 static int build_phys_page_list(struct ib_phys_buf *buffer_list,
422 int num_phys_buf, u64 *iova_start,
423 u64 *total_size, int *npages,
424 int *shift, __be64 **page_list)
431 for (i = 0; i < num_phys_buf; ++i) {
432 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
434 if (i != 0 && i != num_phys_buf - 1 &&
435 (buffer_list[i].size & ~PAGE_MASK))
437 *total_size += buffer_list[i].size;
439 mask |= buffer_list[i].addr;
441 mask |= buffer_list[i].addr & PAGE_MASK;
442 if (i != num_phys_buf - 1)
443 mask |= buffer_list[i].addr + buffer_list[i].size;
445 mask |= (buffer_list[i].addr + buffer_list[i].size +
446 PAGE_SIZE - 1) & PAGE_MASK;
449 if (*total_size > 0xFFFFFFFFULL)
452 /* Find largest page shift we can use to cover buffers */
453 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
454 if ((1ULL << *shift) & mask)
457 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
458 buffer_list[0].addr &= ~0ull << *shift;
461 for (i = 0; i < num_phys_buf; ++i)
462 *npages += (buffer_list[i].size +
463 (1ULL << *shift) - 1) >> *shift;
468 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
473 for (i = 0; i < num_phys_buf; ++i)
475 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
477 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
478 ((u64) j << *shift));
480 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
481 __func__, (unsigned long long)*iova_start,
482 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
489 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
490 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
491 int num_phys_buf, int acc, u64 *iova_start)
494 struct c4iw_mr mh, *mhp;
496 struct c4iw_dev *rhp;
497 __be64 *page_list = NULL;
503 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
505 /* There can be no memory windows */
506 if (atomic_read(&mr->usecnt))
509 mhp = to_c4iw_mr(mr);
511 php = to_c4iw_pd(mr->pd);
513 /* make sure we are on the same adapter */
517 memcpy(&mh, mhp, sizeof *mhp);
519 if (mr_rereg_mask & IB_MR_REREG_PD)
520 php = to_c4iw_pd(pd);
521 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
522 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
523 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
526 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
527 ret = build_phys_page_list(buffer_list, num_phys_buf,
529 &total_size, &npages,
535 ret = reregister_mem(rhp, php, &mh, shift, npages);
539 if (mr_rereg_mask & IB_MR_REREG_PD)
540 mhp->attr.pdid = php->pdid;
541 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
542 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
543 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
545 mhp->attr.va_fbo = *iova_start;
546 mhp->attr.page_size = shift - 12;
547 mhp->attr.len = (u32) total_size;
548 mhp->attr.pbl_size = npages;
554 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
555 struct ib_phys_buf *buffer_list,
556 int num_phys_buf, int acc, u64 *iova_start)
562 struct c4iw_dev *rhp;
567 PDBG("%s ib_pd %p\n", __func__, pd);
568 php = to_c4iw_pd(pd);
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
573 return ERR_PTR(-ENOMEM);
577 /* First check that we have enough alignment */
578 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
583 if (num_phys_buf > 1 &&
584 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
589 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
590 &total_size, &npages, &shift,
595 ret = alloc_pbl(mhp, npages);
601 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
607 mhp->attr.pdid = php->pdid;
610 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
611 mhp->attr.va_fbo = *iova_start;
612 mhp->attr.page_size = shift - 12;
614 mhp->attr.len = (u32) total_size;
615 mhp->attr.pbl_size = npages;
616 ret = register_mem(rhp, php, mhp, shift);
623 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
624 mhp->attr.pbl_size << 3);
632 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
634 struct c4iw_dev *rhp;
638 u32 stag = T4_STAG_UNSET;
640 PDBG("%s ib_pd %p\n", __func__, pd);
641 php = to_c4iw_pd(pd);
644 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
646 return ERR_PTR(-ENOMEM);
649 mhp->attr.pdid = php->pdid;
650 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
651 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
653 mhp->attr.va_fbo = 0;
654 mhp->attr.page_size = 0;
655 mhp->attr.len = ~0UL;
656 mhp->attr.pbl_size = 0;
658 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
659 FW_RI_STAG_NSMR, mhp->attr.perms,
660 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
664 ret = finish_mem_reg(mhp, stag);
669 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
676 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
677 u64 virt, int acc, struct ib_udata *udata)
683 struct ib_umem_chunk *chunk;
684 struct c4iw_dev *rhp;
688 PDBG("%s ib_pd %p\n", __func__, pd);
691 return ERR_PTR(-EINVAL);
693 if ((length + start) < start)
694 return ERR_PTR(-EINVAL);
696 php = to_c4iw_pd(pd);
698 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
700 return ERR_PTR(-ENOMEM);
704 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
705 if (IS_ERR(mhp->umem)) {
706 err = PTR_ERR(mhp->umem);
711 shift = ffs(mhp->umem->page_size) - 1;
714 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
717 err = alloc_pbl(mhp, n);
721 pages = (__be64 *) __get_free_page(GFP_KERNEL);
729 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
730 for (j = 0; j < chunk->nmap; ++j) {
731 len = sg_dma_len(&chunk->page_list[j]) >> shift;
732 for (k = 0; k < len; ++k) {
733 pages[i++] = cpu_to_be64(sg_dma_address(
734 &chunk->page_list[j]) +
735 mhp->umem->page_size * k);
736 if (i == PAGE_SIZE / sizeof *pages) {
737 err = write_pbl(&mhp->rhp->rdev,
739 mhp->attr.pbl_addr + (n << 3), i);
749 err = write_pbl(&mhp->rhp->rdev, pages,
750 mhp->attr.pbl_addr + (n << 3), i);
753 free_page((unsigned long) pages);
757 mhp->attr.pdid = php->pdid;
759 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
760 mhp->attr.va_fbo = virt;
761 mhp->attr.page_size = shift - 12;
762 mhp->attr.len = length;
764 err = register_mem(rhp, php, mhp, shift);
771 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
772 mhp->attr.pbl_size << 3);
775 ib_umem_release(mhp->umem);
780 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
782 struct c4iw_dev *rhp;
789 if (type != IB_MW_TYPE_1)
790 return ERR_PTR(-EINVAL);
792 php = to_c4iw_pd(pd);
794 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
796 return ERR_PTR(-ENOMEM);
797 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
803 mhp->attr.pdid = php->pdid;
804 mhp->attr.type = FW_RI_STAG_MW;
805 mhp->attr.stag = stag;
807 mhp->ibmw.rkey = stag;
808 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
809 deallocate_window(&rhp->rdev, mhp->attr.stag);
811 return ERR_PTR(-ENOMEM);
813 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
817 int c4iw_dealloc_mw(struct ib_mw *mw)
819 struct c4iw_dev *rhp;
823 mhp = to_c4iw_mw(mw);
825 mmid = (mw->rkey) >> 8;
826 remove_handle(rhp, &rhp->mmidr, mmid);
827 deallocate_window(&rhp->rdev, mhp->attr.stag);
829 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
833 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
835 struct c4iw_dev *rhp;
842 php = to_c4iw_pd(pd);
844 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
851 ret = alloc_pbl(mhp, pbl_depth);
854 mhp->attr.pbl_size = pbl_depth;
855 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
856 mhp->attr.pbl_size, mhp->attr.pbl_addr);
859 mhp->attr.pdid = php->pdid;
860 mhp->attr.type = FW_RI_STAG_NSMR;
861 mhp->attr.stag = stag;
864 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
865 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
870 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
873 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
876 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
877 mhp->attr.pbl_size << 3);
884 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
887 struct c4iw_fr_page_list *c4pl;
888 struct c4iw_dev *dev = to_c4iw_dev(device);
890 int pll_len = roundup(page_list_len * sizeof(u64), 32);
892 c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
894 return ERR_PTR(-ENOMEM);
896 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
899 if (!c4pl->ibpl.page_list) {
901 return ERR_PTR(-ENOMEM);
903 dma_unmap_addr_set(c4pl, mapping, dma_addr);
904 c4pl->dma_addr = dma_addr;
906 c4pl->ibpl.max_page_list_len = pll_len;
911 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
913 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
915 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
916 c4pl->ibpl.max_page_list_len,
917 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
921 int c4iw_dereg_mr(struct ib_mr *ib_mr)
923 struct c4iw_dev *rhp;
927 PDBG("%s ib_mr %p\n", __func__, ib_mr);
928 /* There can be no memory windows */
929 if (atomic_read(&ib_mr->usecnt))
932 mhp = to_c4iw_mr(ib_mr);
934 mmid = mhp->attr.stag >> 8;
935 remove_handle(rhp, &rhp->mmidr, mmid);
936 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
938 if (mhp->attr.pbl_size)
939 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
940 mhp->attr.pbl_size << 3);
942 kfree((void *) (unsigned long) mhp->kva);
944 ib_umem_release(mhp->umem);
945 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);