/* * Copyright (c) 2009-2017 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include "iw_cxgb4.h" int use_dsgl = 1; module_param(use_dsgl, int, 0644); MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)"); #define T4_ULPTX_MIN_IO 32 #define C4IW_MAX_INLINE_SIZE 96 #define T4_ULPTX_MAX_DMA 1024 #define C4IW_INLINE_THRESHOLD 128 static int inline_threshold = C4IW_INLINE_THRESHOLD; module_param(inline_threshold, int, 0644); MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) { return (is_t4(dev->rdev.lldi.adapter_type) || is_t5(dev->rdev.lldi.adapter_type)) && length >= 8*1024*1024*1024ULL; } static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len, dma_addr_t data, int wait, struct sk_buff *skb) { struct ulp_mem_io *req; struct ulptx_sgl *sgl; u8 wr_len; int ret = 0; struct c4iw_wr_wait wr_wait; addr &= 0x7FFFFFF; if (wait) c4iw_init_wr_wait(&wr_wait); wr_len = roundup(sizeof *req + sizeof *sgl, 16); if(!skb) { skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); if (!skb) return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, NCHAN); req = (struct ulp_mem_io *)__skb_put(skb, wr_len); memset(req, 0, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | (wait ? F_FW_WR_COMPL : 0)); req->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0; req->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); req->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) | V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_FID(rdev->lldi.rxq_ids[0])); req->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr)); sgl = (struct ulptx_sgl *)(req + 1); sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1)); sgl->len0 = cpu_to_be32(len); sgl->addr0 = cpu_to_be64(data); ret = c4iw_ofld_send(rdev, skb); if (ret) return ret; if (wait) { ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); } return ret; } static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data, struct sk_buff *skb) { struct ulp_mem_io *req; struct ulptx_idata *sc; u8 wr_len, *to_dp, *from_dp; int copy_len, num_wqe, i, ret = 0; struct c4iw_wr_wait wr_wait; __be32 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); if (is_t4(rdev->lldi.adapter_type)) cmd |= cpu_to_be32(V_ULP_MEMIO_ORDER(1)); else cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1)); addr &= 0x7FFFFFF; PDBG("%s addr 0x%x len %u\n", __func__, addr, len); num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); c4iw_init_wr_wait(&wr_wait); for (i = 0; i < num_wqe; i++) { copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE : len; wr_len = roundup(sizeof *req + sizeof *sc + roundup(copy_len, T4_ULPTX_MIN_IO), 16); if (!skb) { skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); if (!skb) return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, NCHAN); req = (struct ulp_mem_io *)__skb_put(skb, wr_len); memset(req, 0, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { req->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL); req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait; } else req->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR)); req->wr.wr_mid = cpu_to_be32( V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); req->cmd = cmd; req->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN( DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3)); sc = (struct ulptx_idata *)(req + 1); sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); to_dp = (u8 *)(sc + 1); from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; if (data) memcpy(to_dp, from_dp, copy_len); else memset(to_dp, 0, copy_len); if (copy_len % T4_ULPTX_MIN_IO) memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - (copy_len % T4_ULPTX_MIN_IO)); ret = c4iw_ofld_send(rdev, skb); skb = NULL; if (ret) return ret; len -= C4IW_MAX_INLINE_SIZE; } ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); return ret; } int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data, struct sk_buff *skb) { u32 remain = len; u32 dmalen; int ret = 0; dma_addr_t daddr; dma_addr_t save; daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) return -1; save = daddr; while (remain > inline_threshold) { if (remain < T4_ULPTX_MAX_DMA) { if (remain & ~T4_ULPTX_MIN_IO) dmalen = remain & ~(T4_ULPTX_MIN_IO-1); else dmalen = remain; } else dmalen = T4_ULPTX_MAX_DMA; remain -= dmalen; ret =_c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, !remain, skb); if (ret) goto out; addr += dmalen >> 5; data += dmalen; daddr += dmalen; } if (remain) ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb); out: dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); return ret; } /* * write len bytes of data into addr (32B aligned address) * If data is NULL, clear len byte of memory to zero. */ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data, struct sk_buff *skb) { if (rdev->lldi.ulptx_memwrite_dsgl && use_dsgl) { if (len > inline_threshold) { if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) { if (printk_ratelimit()) printk(KERN_WARNING MOD "%s: dma map " "failure (non fatal)\n", pci_name(rdev->lldi.pdev)); return _c4iw_write_mem_inline(rdev, addr, len, data, skb); } else return 0; } else return _c4iw_write_mem_inline(rdev, addr, len, data, skb); } else return _c4iw_write_mem_inline(rdev, addr, len, data, skb); } /* * Build and write a TPT entry. * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, * pbl_size and pbl_addr * OUT: stag index */ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, int bind_enabled, u32 zbva, u64 to, u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr, struct sk_buff *skb) { int err; struct fw_ri_tpte tpt; u32 stag_idx; static atomic_t key; stag_state = stag_state > 0; stag_idx = (*stag) >> 8; if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); if (!stag_idx) { mutex_lock(&rdev->stats.lock); rdev->stats.stag.fail++; mutex_unlock(&rdev->stats.lock); return -ENOMEM; } mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur += 32; if (rdev->stats.stag.cur > rdev->stats.stag.max) rdev->stats.stag.max = rdev->stats.stag.cur; mutex_unlock(&rdev->stats.lock); *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); } PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", __func__, stag_state, type, pdid, stag_idx); /* write TPT entry */ if (reset_tpt_entry) memset(&tpt, 0, sizeof(tpt)); else { tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | V_FW_RI_TPTE_STAGSTATE(stag_state) | V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| V_FW_RI_TPTE_PS(page_size)); tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); tpt.va_hi = cpu_to_be32((u32)(to >> 32)); tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); tpt.dca_mwbcnt_pstag = cpu_to_be32(0); tpt.len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), sizeof(tpt), &tpt, skb); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } return err; } static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, u32 pbl_addr, u32 pbl_size) { int err; PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", __func__, pbl_addr, rdev->lldi.vr->pbl.start, pbl_size); err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL); return err; } static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, u32 pbl_addr, struct sk_buff *skb) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr, skb); } static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 0UL, 0, 0, 0, 0, NULL); } static int deallocate_window(struct c4iw_rdev *rdev, u32 stag, struct sk_buff *skb) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 0, skb); } static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr, NULL); } static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) { u32 mmid; mhp->attr.state = 1; mhp->attr.stag = stag; mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); } static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift) { u32 stag = T4_STAG_UNSET; int ret; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL); if (ret) return ret; ret = finish_mem_reg(mhp, stag); if (ret) { dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb); mhp->dereg_skb = NULL; } return ret; } static int alloc_pbl(struct c4iw_mr *mhp, int npages) { mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, npages << 3); if (!mhp->attr.pbl_addr) return -ENOMEM; mhp->attr.pbl_size = npages; return 0; } struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; int ret; u32 stag = T4_STAG_UNSET; PDBG("%s ib_pd %p\n", __func__, pd); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); if (!mhp->dereg_skb) { ret = -ENOMEM; goto err0; } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; mhp->attr.zbva = 0; mhp->attr.va_fbo = 0; mhp->attr.page_size = 0; mhp->attr.len = ~0ULL; mhp->attr.pbl_size = 0; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0, NULL); if (ret) goto err1; ret = finish_mem_reg(mhp, stag); if (ret) goto err2; return &mhp->ibmr; err2: dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb); err1: kfree_skb(mhp->dereg_skb); err0: kfree(mhp); return ERR_PTR(ret); } static int try_huge_pbl(struct c4iw_dev *rhp, struct c4iw_mr *mhp, u64 start, u64 virt_addr, struct ib_udata *udata, int *shift) { #if defined(CONFIG_HUGETLB_PAGE) && !defined(__powerpc__) && !defined(__ia64__) __be64 *pages; #ifdef HAVE_IB_UMEM_CHUNK struct ib_umem_chunk *chunk; int j = 0; #else struct scatterlist *sg; int entry; #endif unsigned dsize; dma_addr_t daddr; unsigned cur_size = 0; dma_addr_t uninitialized_var(cur_addr); int n; struct ib_umem *umem = mhp->umem; int err; int i; int off = start & (HPAGE_SIZE - 1); __u64 usr_pbl, *raw_pbl; __u64 __user *usr_pbl_ptr; int onepbl = 1; int s; n = DIV_ROUND_UP(off + umem->length, HPAGE_SIZE); err = alloc_pbl(mhp, n); if (err) goto err; raw_pbl = (__u64 *) __get_free_page(GFP_KERNEL); if (!raw_pbl) { err = -ENOMEM; goto err_pbl; } err = ib_copy_from_udata(&usr_pbl, udata, sizeof(u64)); if (err) goto err_raw_pbl; usr_pbl_ptr = (__u64 __user *)(unsigned long)usr_pbl; pages = (__be64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_raw_pbl; } i = n = 0; #ifdef HAVE_IB_UMEM_CHUNK list_for_each_entry(chunk, &umem->chunk_list, list) { for (j = 0; j < chunk->nmap; ++j) { daddr = sg_dma_address(&chunk->page_list[j]); dsize = sg_dma_len(&chunk->page_list[j]); #else for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { daddr = sg_dma_address(sg); dsize = sg_dma_len(sg); #endif if (!cur_size) { cur_addr = daddr; cur_size = dsize; } else if (cur_addr + cur_size != daddr) { err = -EINVAL; goto pbl_done; } else cur_size += dsize; if (cur_size > HPAGE_SIZE) { err = -EINVAL; goto pbl_done; } else if (cur_size == HPAGE_SIZE) { if (cur_addr & (HPAGE_SIZE - 1)) { err = -EINVAL; goto pbl_done; } cur_size = 0; onepbl = 0; raw_pbl[i] = cur_addr; pages[i++] = cpu_to_be64(cur_addr); if (i == PAGE_SIZE / sizeof *pages) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); if (err) goto pbl_done; err = copy_to_user(usr_pbl_ptr + n, raw_pbl, PAGE_SIZE); if (err) goto pbl_done; n += i; i = 0; } } } #ifdef HAVE_IB_UMEM_CHUNK } #endif if (onepbl) { s = 12; while ((1 << s) < cur_size) s++; } else s = ffs(HPAGE_SIZE) - 1; if (cur_size) { if (cur_addr & ((1 << s) - 1)) { err = -EINVAL; goto pbl_done; } raw_pbl[i] = cur_addr; pages[i++] = cpu_to_be64(cur_addr); } if (i) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); if (!err) err = copy_to_user(usr_pbl_ptr + n, raw_pbl, i * sizeof *usr_pbl_ptr); } pbl_done: free_page((unsigned long) pages); free_page((unsigned long) raw_pbl); if (err) goto err_pbl; *shift = s; PDBG("%s *shift %d\n", __func__, *shift); return 0; err_raw_pbl: free_page((unsigned long)raw_pbl); err_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err: return err; #else return -ENOSYS; #endif } struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { __be64 *pages; int shift = 0, n, len; #ifdef HAVE_IB_UMEM_CHUNK int i, j, k; struct ib_umem_chunk *chunk; #else int i, k, entry; struct scatterlist *sg; #endif int err = 0; struct c4iw_reg_mr_resp uresp; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; __u64 usr_pbl, *raw_pbl; __u64 __user *usr_pbl_ptr; int npages; int oldlib = 0; static int warned; PDBG("%s ib_pd %p\n", __func__, pd); if (udata->outlen < sizeof uresp) { oldlib = 1; if (!warned++) printk(KERN_WARNING MOD "Warning - downlevel libcxgb4 (non-fatal), hugepage PBLs disabled."); } if (length == ~0ULL) return ERR_PTR(-EINVAL); if ((length + start) < start) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; if (mr_exceeds_hw_limits(rhp, length)) return ERR_PTR(-EINVAL); mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); if (!mhp->dereg_skb) { kfree(mhp); return ERR_PTR(-ENOMEM); } mhp->rhp = rhp; mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); kfree_skb(mhp->dereg_skb); kfree(mhp); return ERR_PTR(err); } if (oldlib || !mhp->umem->hugetlb || try_huge_pbl(rhp, mhp, start, virt, udata, &shift)) { shift = ffs(mhp->umem->page_size) - 1; #ifdef HAVE_IB_UMEM_CHUNK n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) n += chunk->nents; #else n = mhp->umem->nmap; #endif err = alloc_pbl(mhp, n); if (err) goto err; raw_pbl = (__u64 *) __get_free_page(GFP_KERNEL); if (!raw_pbl) goto err_pbl; err = ib_copy_from_udata(&usr_pbl, udata, sizeof(u64)); if (err) goto err_raw_pbl; usr_pbl_ptr = (__u64 __user *)(unsigned long)usr_pbl; pages = (__be64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_raw_pbl; } npages = n; i = n = 0; #ifdef HAVE_IB_UMEM_CHUNK list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { u64 pa = sg_dma_address(&chunk->page_list[j]) + mhp->umem->page_size * k; #else for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { len = sg_dma_len(sg) >> shift; for (k = 0; k < len; ++k) { u64 pa = sg_dma_address(sg) + mhp->umem->page_size * k; #endif raw_pbl[i] = pa; pages[i++] = cpu_to_be64(pa); if (i == PAGE_SIZE / sizeof *pages) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); if (err) goto pbl_done; err = copy_to_user(usr_pbl_ptr + n, raw_pbl, PAGE_SIZE); if (err) goto pbl_done; n += i; i = 0; } } } if (i) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); if (!err) err = copy_to_user(usr_pbl_ptr + n, raw_pbl, i * sizeof *usr_pbl_ptr); } pbl_done: free_page((unsigned long) pages); free_page((unsigned long) raw_pbl); if (err) goto err_pbl; } mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = length; err = register_mem(rhp, php, mhp, shift); if (err) goto err_pbl; if (!oldlib) { uresp.page_size = 1 << shift; err = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (err) goto err_pbl; } return &mhp->ibmr; err_raw_pbl: free_page((unsigned long)raw_pbl); err_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err: kfree_skb(mhp->dereg_skb); ib_umem_release(mhp->umem); kfree(mhp); return ERR_PTR(err); } struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mw *mhp; u32 mmid; u32 stag = 0; int ret; if (type != IB_MW_TYPE_1) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); if (!mhp->dereg_skb) { ret = -ENOMEM; goto err0; } ret = allocate_window(&rhp->rdev, &stag, php->pdid); if (ret) { goto err1; } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_MW; mhp->attr.stag = stag; mmid = (stag) >> 8; mhp->ibmw.rkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { ret = -ENOMEM; goto err2; } PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmw); err2: deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); err1: if (mhp->dereg_skb) kfree_skb(mhp->dereg_skb); err0: kfree(mhp); return ERR_PTR(ret); } int c4iw_dealloc_mw(struct ib_mw *mw) { struct c4iw_dev *rhp; struct c4iw_mw *mhp; u32 mmid; mhp = to_c4iw_mw(mw); rhp = mhp->rhp; mmid = (mw->rkey) >> 8; remove_handle(rhp, &rhp->mmidr, mmid); deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); if (mhp->dereg_skb) kfree_skb(mhp->dereg_skb); PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); kfree(mhp); return 0; } struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; u32 mmid; u32 stag = 0; int ret = 0; int length = roundup(max_num_sg * sizeof(u64), 32); php = to_c4iw_pd(pd); rhp = php->rhp; if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl)) return ERR_PTR(-EINVAL); mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) { ret = -ENOMEM; goto err; } mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, length, &mhp->mpl_addr, GFP_KERNEL); if (!mhp->mpl) { ret = -ENOMEM; goto err_mpl; } mhp->max_mpl_len = length; mhp->rhp = rhp; ret = alloc_pbl(mhp, max_num_sg); if (ret) goto err1; mhp->attr.pbl_size = max_num_sg; ret = allocate_stag(&rhp->rdev, &stag, php->pdid, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) goto err2; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_NSMR; mhp->attr.stag = stag; mhp->attr.state = 0; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { ret = -ENOMEM; goto err3; } PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmr); err3: dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb); err2: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err1: dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); err_mpl: kfree(mhp); err: return ERR_PTR(ret); } static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) return -ENOMEM; mhp->mpl[mhp->mpl_len++] = addr; return 0; } int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, #ifdef IWARP_HAVE_SG_OFFSET int sg_nents, unsigned int *sg_offset) #else int sg_nents) #endif { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); mhp->mpl_len = 0; #ifdef IWARP_HAVE_SG_OFFSET return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); #else return ib_sg_to_pages(ibmr, sg, sg_nents, c4iw_set_page); #endif } int c4iw_dereg_mr(struct ib_mr *ib_mr) { struct c4iw_dev *rhp; struct c4iw_mr *mhp; u32 mmid; PDBG("%s ib_mr %p\n", __func__, ib_mr); mhp = to_c4iw_mr(ib_mr); rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; remove_handle(rhp, &rhp->mmidr, mmid); if (mhp->mpl) dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr, mhp->dereg_skb); if (mhp->attr.pbl_size) c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); if (mhp->kva) kfree((void *) (unsigned long) mhp->kva); if (mhp->umem) ib_umem_release(mhp->umem); PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); kfree(mhp); return 0; } void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) { struct c4iw_mr *mhp; unsigned long flags; spin_lock_irqsave(&rhp->lock, flags); mhp = get_mhp(rhp, rkey >> 8); if (mhp) mhp->attr.state = 0; spin_unlock_irqrestore(&rhp->lock, flags); }