Lines Matching refs:buf_desc
53 struct smc_buf_desc *buf_desc);
1094 static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb, in smcr_buf_unuse() argument
1100 if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) { in smcr_buf_unuse()
1106 smc_llc_do_delete_rkey(lgr, buf_desc); in smcr_buf_unuse()
1107 buf_desc->is_conf_rkey = false; in smcr_buf_unuse()
1113 if (buf_desc->is_reg_err) { in smcr_buf_unuse()
1118 list_del(&buf_desc->list); in smcr_buf_unuse()
1121 smc_buf_free(lgr, is_rmb, buf_desc); in smcr_buf_unuse()
1123 buf_desc->used = 0; in smcr_buf_unuse()
1124 memset(buf_desc->cpu_addr, 0, buf_desc->len); in smcr_buf_unuse()
1193 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb, in smcr_buf_unmap_link() argument
1196 if (is_rmb || buf_desc->is_vm) in smcr_buf_unmap_link()
1197 buf_desc->is_reg_mr[lnk->link_idx] = false; in smcr_buf_unmap_link()
1198 if (!buf_desc->is_map_ib[lnk->link_idx]) in smcr_buf_unmap_link()
1201 if ((is_rmb || buf_desc->is_vm) && in smcr_buf_unmap_link()
1202 buf_desc->mr[lnk->link_idx]) { in smcr_buf_unmap_link()
1203 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]); in smcr_buf_unmap_link()
1204 buf_desc->mr[lnk->link_idx] = NULL; in smcr_buf_unmap_link()
1207 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE); in smcr_buf_unmap_link()
1209 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE); in smcr_buf_unmap_link()
1211 sg_free_table(&buf_desc->sgt[lnk->link_idx]); in smcr_buf_unmap_link()
1212 buf_desc->is_map_ib[lnk->link_idx] = false; in smcr_buf_unmap_link()
1219 struct smc_buf_desc *buf_desc, *bf; in smcr_buf_unmap_lgr() local
1224 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) in smcr_buf_unmap_lgr()
1225 smcr_buf_unmap_link(buf_desc, true, lnk); in smcr_buf_unmap_lgr()
1228 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], in smcr_buf_unmap_lgr()
1230 smcr_buf_unmap_link(buf_desc, false, lnk); in smcr_buf_unmap_lgr()
1292 struct smc_buf_desc *buf_desc) in smcr_buf_free() argument
1297 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]); in smcr_buf_free()
1299 if (!buf_desc->is_vm && buf_desc->pages) in smcr_buf_free()
1300 __free_pages(buf_desc->pages, buf_desc->order); in smcr_buf_free()
1301 else if (buf_desc->is_vm && buf_desc->cpu_addr) in smcr_buf_free()
1302 vfree(buf_desc->cpu_addr); in smcr_buf_free()
1303 kfree(buf_desc); in smcr_buf_free()
1307 struct smc_buf_desc *buf_desc) in smcd_buf_free() argument
1311 buf_desc->len += sizeof(struct smcd_cdc_msg); in smcd_buf_free()
1312 smc_ism_unregister_dmb(lgr->smcd, buf_desc); in smcd_buf_free()
1314 kfree(buf_desc->cpu_addr); in smcd_buf_free()
1316 kfree(buf_desc); in smcd_buf_free()
1320 struct smc_buf_desc *buf_desc) in smc_buf_free() argument
1323 smcd_buf_free(lgr, is_rmb, buf_desc); in smc_buf_free()
1325 smcr_buf_free(lgr, is_rmb, buf_desc); in smc_buf_free()
1330 struct smc_buf_desc *buf_desc, *bf_desc; in __smc_lgr_free_bufs() local
1339 list_for_each_entry_safe(buf_desc, bf_desc, buf_list, in __smc_lgr_free_bufs()
1341 list_del(&buf_desc->list); in __smc_lgr_free_bufs()
1342 smc_buf_free(lgr, is_rmb, buf_desc); in __smc_lgr_free_bufs()
2015 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb, in smcr_buf_map_link() argument
2022 if (buf_desc->is_map_ib[lnk->link_idx]) in smcr_buf_map_link()
2025 if (buf_desc->is_vm) { in smcr_buf_map_link()
2026 buf = buf_desc->cpu_addr; in smcr_buf_map_link()
2027 buf_size = buf_desc->len; in smcr_buf_map_link()
2028 offset = offset_in_page(buf_desc->cpu_addr); in smcr_buf_map_link()
2034 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL); in smcr_buf_map_link()
2038 if (buf_desc->is_vm) { in smcr_buf_map_link()
2040 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) { in smcr_buf_map_link()
2049 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl, in smcr_buf_map_link()
2050 buf_desc->cpu_addr, buf_desc->len); in smcr_buf_map_link()
2054 rc = smc_ib_buf_map_sg(lnk, buf_desc, in smcr_buf_map_link()
2062 buf_desc->is_dma_need_sync |= in smcr_buf_map_link()
2063 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx; in smcr_buf_map_link()
2065 if (is_rmb || buf_desc->is_vm) { in smcr_buf_map_link()
2072 buf_desc, lnk->link_idx); in smcr_buf_map_link()
2075 smc_ib_sync_sg_for_device(lnk, buf_desc, in smcr_buf_map_link()
2078 buf_desc->is_map_ib[lnk->link_idx] = true; in smcr_buf_map_link()
2082 smc_ib_buf_unmap_sg(lnk, buf_desc, in smcr_buf_map_link()
2085 sg_free_table(&buf_desc->sgt[lnk->link_idx]); in smcr_buf_map_link()
2092 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc) in smcr_link_reg_buf() argument
2096 if (!buf_desc->is_reg_mr[link->link_idx]) { in smcr_link_reg_buf()
2098 if (buf_desc->is_vm) in smcr_link_reg_buf()
2099 buf_desc->mr[link->link_idx]->iova = in smcr_link_reg_buf()
2100 (uintptr_t)buf_desc->cpu_addr; in smcr_link_reg_buf()
2101 if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) { in smcr_link_reg_buf()
2102 buf_desc->is_reg_err = true; in smcr_link_reg_buf()
2105 buf_desc->is_reg_mr[link->link_idx] = true; in smcr_link_reg_buf()
2113 struct smc_buf_desc *buf_desc, *bf; in _smcr_buf_map_lgr() local
2117 list_for_each_entry_safe(buf_desc, bf, lst, list) { in _smcr_buf_map_lgr()
2118 if (!buf_desc->used) in _smcr_buf_map_lgr()
2120 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk); in _smcr_buf_map_lgr()
2154 struct smc_buf_desc *buf_desc, *bf; in smcr_buf_reg_lgr() local
2160 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) { in smcr_buf_reg_lgr()
2161 if (!buf_desc->used) in smcr_buf_reg_lgr()
2163 rc = smcr_link_reg_buf(lnk, buf_desc); in smcr_buf_reg_lgr()
2178 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) { in smcr_buf_reg_lgr()
2179 if (!buf_desc->used || !buf_desc->is_vm) in smcr_buf_reg_lgr()
2181 rc = smcr_link_reg_buf(lnk, buf_desc); in smcr_buf_reg_lgr()
2195 struct smc_buf_desc *buf_desc; in smcr_new_buf_create() local
2198 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); in smcr_new_buf_create()
2199 if (!buf_desc) in smcr_new_buf_create()
2205 buf_desc->order = get_order(bufsize); in smcr_new_buf_create()
2206 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN | in smcr_new_buf_create()
2209 buf_desc->order); in smcr_new_buf_create()
2210 if (buf_desc->pages) { in smcr_new_buf_create()
2211 buf_desc->cpu_addr = in smcr_new_buf_create()
2212 (void *)page_address(buf_desc->pages); in smcr_new_buf_create()
2213 buf_desc->len = bufsize; in smcr_new_buf_create()
2214 buf_desc->is_vm = false; in smcr_new_buf_create()
2221 buf_desc->order = get_order(bufsize); in smcr_new_buf_create()
2222 buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order); in smcr_new_buf_create()
2223 if (!buf_desc->cpu_addr) in smcr_new_buf_create()
2225 buf_desc->pages = NULL; in smcr_new_buf_create()
2226 buf_desc->len = bufsize; in smcr_new_buf_create()
2227 buf_desc->is_vm = true; in smcr_new_buf_create()
2230 return buf_desc; in smcr_new_buf_create()
2233 kfree(buf_desc); in smcr_new_buf_create()
2241 struct smc_buf_desc *buf_desc, bool is_rmb) in smcr_buf_map_usable_links() argument
2252 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) { in smcr_buf_map_usable_links()
2268 struct smc_buf_desc *buf_desc; in smcd_new_buf_create() local
2272 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); in smcd_new_buf_create()
2273 if (!buf_desc) in smcd_new_buf_create()
2276 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); in smcd_new_buf_create()
2278 kfree(buf_desc); in smcd_new_buf_create()
2285 buf_desc->pages = virt_to_page(buf_desc->cpu_addr); in smcd_new_buf_create()
2287 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg); in smcd_new_buf_create()
2289 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | in smcd_new_buf_create()
2292 if (!buf_desc->cpu_addr) { in smcd_new_buf_create()
2293 kfree(buf_desc); in smcd_new_buf_create()
2296 buf_desc->len = bufsize; in smcd_new_buf_create()
2298 return buf_desc; in smcd_new_buf_create()
2303 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); in __smc_buf_create() local
2331 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list); in __smc_buf_create()
2332 if (buf_desc) { in __smc_buf_create()
2333 buf_desc->is_dma_need_sync = 0; in __smc_buf_create()
2340 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize); in __smc_buf_create()
2342 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize); in __smc_buf_create()
2344 if (PTR_ERR(buf_desc) == -ENOMEM) in __smc_buf_create()
2346 if (IS_ERR(buf_desc)) { in __smc_buf_create()
2356 buf_desc->used = 1; in __smc_buf_create()
2358 list_add(&buf_desc->list, buf_list); in __smc_buf_create()
2363 if (IS_ERR(buf_desc)) in __smc_buf_create()
2364 return PTR_ERR(buf_desc); in __smc_buf_create()
2367 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) { in __smc_buf_create()
2368 smcr_buf_unuse(buf_desc, is_rmb, lgr); in __smc_buf_create()
2374 conn->rmb_desc = buf_desc; in __smc_buf_create()
2379 smc_rmb_wnd_update_limit(buf_desc->len); in __smc_buf_create()
2383 conn->sndbuf_desc = buf_desc; in __smc_buf_create()