Lines Matching refs:info

15 		struct smbd_connection *info);
17 struct smbd_connection *info);
19 struct smbd_connection *info,
21 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
22 static void destroy_receive_buffers(struct smbd_connection *info);
25 struct smbd_connection *info, struct smbd_response *response);
27 struct smbd_connection *info,
30 struct smbd_connection *info);
33 struct smbd_connection *info,
36 static int smbd_post_send_empty(struct smbd_connection *info);
38 struct smbd_connection *info,
40 static int smbd_post_send_page(struct smbd_connection *info,
44 static void destroy_mr_list(struct smbd_connection *info);
45 static int allocate_mr_list(struct smbd_connection *info);
160 struct smbd_connection *info = in smbd_disconnect_rdma_work() local
163 if (info->transport_status == SMBD_CONNECTED) { in smbd_disconnect_rdma_work()
164 info->transport_status = SMBD_DISCONNECTING; in smbd_disconnect_rdma_work()
165 rdma_disconnect(info->id); in smbd_disconnect_rdma_work()
169 static void smbd_disconnect_rdma_connection(struct smbd_connection *info) in smbd_disconnect_rdma_connection() argument
171 queue_work(info->workqueue, &info->disconnect_work); in smbd_disconnect_rdma_connection()
178 struct smbd_connection *info = id->context; in smbd_conn_upcall() local
186 info->ri_rc = 0; in smbd_conn_upcall()
187 complete(&info->ri_done); in smbd_conn_upcall()
191 info->ri_rc = -EHOSTUNREACH; in smbd_conn_upcall()
192 complete(&info->ri_done); in smbd_conn_upcall()
196 info->ri_rc = -ENETUNREACH; in smbd_conn_upcall()
197 complete(&info->ri_done); in smbd_conn_upcall()
202 info->transport_status = SMBD_CONNECTED; in smbd_conn_upcall()
203 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
210 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
211 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
217 if (info->transport_status == SMBD_NEGOTIATE_FAILED) { in smbd_conn_upcall()
218 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
219 wake_up(&info->conn_wait); in smbd_conn_upcall()
223 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
224 wake_up_interruptible(&info->disconn_wait); in smbd_conn_upcall()
225 wake_up_interruptible(&info->wait_reassembly_queue); in smbd_conn_upcall()
226 wake_up_interruptible_all(&info->wait_send_queue); in smbd_conn_upcall()
240 struct smbd_connection *info = context; in smbd_qp_async_error_upcall() local
243 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall()
248 smbd_disconnect_rdma_connection(info); in smbd_qp_async_error_upcall()
279 smbd_disconnect_rdma_connection(request->info); in send_done()
283 ib_dma_unmap_single(request->info->id->device, in send_done()
288 if (atomic_dec_and_test(&request->info->send_pending)) in send_done()
289 wake_up(&request->info->wait_send_pending); in send_done()
291 wake_up(&request->info->wait_post_send); in send_done()
293 mempool_free(request, request->info->request_mempool); in send_done()
314 struct smbd_connection *info = response->info; in process_negotiation_response() local
328 info->protocol = le16_to_cpu(packet->negotiated_version); in process_negotiation_response()
334 info->receive_credit_target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
340 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
342 atomic_set(&info->receive_credits, 0); in process_negotiation_response()
344 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { in process_negotiation_response()
349 info->max_receive_size = le32_to_cpu(packet->preferred_send_size); in process_negotiation_response()
356 info->max_send_size = min_t(int, info->max_send_size, in process_negotiation_response()
365 info->max_fragmented_send_size = in process_negotiation_response()
367 info->rdma_readwrite_threshold = in process_negotiation_response()
368 rdma_readwrite_threshold > info->max_fragmented_send_size ? in process_negotiation_response()
369 info->max_fragmented_send_size : in process_negotiation_response()
373 info->max_readwrite_size = min_t(u32, in process_negotiation_response()
375 info->max_frmr_depth * PAGE_SIZE); in process_negotiation_response()
376 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; in process_negotiation_response()
387 struct smbd_connection *info = in smbd_post_send_credits() local
391 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_credits()
392 wake_up(&info->wait_receive_queues); in smbd_post_send_credits()
396 if (info->receive_credit_target > in smbd_post_send_credits()
397 atomic_read(&info->receive_credits)) { in smbd_post_send_credits()
400 response = get_receive_buffer(info); in smbd_post_send_credits()
402 response = get_empty_queue_buffer(info); in smbd_post_send_credits()
414 rc = smbd_post_recv(info, response); in smbd_post_send_credits()
418 put_receive_buffer(info, response); in smbd_post_send_credits()
426 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_credits()
427 info->new_credits_offered += ret; in smbd_post_send_credits()
428 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_credits()
431 info->send_immediate = true; in smbd_post_send_credits()
432 if (atomic_read(&info->receive_credits) < in smbd_post_send_credits()
433 info->receive_credit_target - 1) { in smbd_post_send_credits()
434 if (info->keep_alive_requested == KEEP_ALIVE_PENDING || in smbd_post_send_credits()
435 info->send_immediate) { in smbd_post_send_credits()
437 smbd_post_send_empty(info); in smbd_post_send_credits()
448 struct smbd_connection *info = response->info; in recv_done() local
458 smbd_disconnect_rdma_connection(info); in recv_done()
472 info->full_packet_received = true; in recv_done()
473 info->negotiate_done = in recv_done()
475 complete(&info->negotiate_completion); in recv_done()
488 if (info->full_packet_received) in recv_done()
492 info->full_packet_received = false; in recv_done()
494 info->full_packet_received = true; in recv_done()
497 info, in recv_done()
501 put_empty_packet(info, response); in recv_done()
504 wake_up_interruptible(&info->wait_reassembly_queue); in recv_done()
506 atomic_dec(&info->receive_credits); in recv_done()
507 info->receive_credit_target = in recv_done()
511 &info->send_credits); in recv_done()
516 wake_up_interruptible(&info->wait_send_queue); in recv_done()
526 info->keep_alive_requested = KEEP_ALIVE_NONE; in recv_done()
529 info->keep_alive_requested = KEEP_ALIVE_PENDING; in recv_done()
540 put_receive_buffer(info, response); in recv_done()
544 struct smbd_connection *info, in smbd_create_id() argument
551 id = rdma_create_id(&init_net, smbd_conn_upcall, info, in smbd_create_id()
566 init_completion(&info->ri_done); in smbd_create_id()
567 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
576 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
582 rc = info->ri_rc; in smbd_create_id()
588 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
595 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
601 rc = info->ri_rc; in smbd_create_id()
629 struct smbd_connection *info, in smbd_ia_open() argument
634 info->id = smbd_create_id(info, dstaddr, port); in smbd_ia_open()
635 if (IS_ERR(info->id)) { in smbd_ia_open()
636 rc = PTR_ERR(info->id); in smbd_ia_open()
640 if (!frwr_is_supported(&info->id->device->attrs)) { in smbd_ia_open()
643 info->id->device->attrs.device_cap_flags, in smbd_ia_open()
644 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
648 info->max_frmr_depth = min_t(int, in smbd_ia_open()
650 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
651 info->mr_type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
652 if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in smbd_ia_open()
653 info->mr_type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
655 info->pd = ib_alloc_pd(info->id->device, 0); in smbd_ia_open()
656 if (IS_ERR(info->pd)) { in smbd_ia_open()
657 rc = PTR_ERR(info->pd); in smbd_ia_open()
665 rdma_destroy_id(info->id); in smbd_ia_open()
666 info->id = NULL; in smbd_ia_open()
678 static int smbd_post_send_negotiate_req(struct smbd_connection *info) in smbd_post_send_negotiate_req() argument
685 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_negotiate_req()
689 request->info = info; in smbd_post_send_negotiate_req()
695 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_negotiate_req()
696 packet->preferred_send_size = cpu_to_le32(info->max_send_size); in smbd_post_send_negotiate_req()
697 packet->max_receive_size = cpu_to_le32(info->max_receive_size); in smbd_post_send_negotiate_req()
699 cpu_to_le32(info->max_fragmented_recv_size); in smbd_post_send_negotiate_req()
703 info->id->device, (void *)packet, in smbd_post_send_negotiate_req()
705 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
711 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_negotiate_req()
714 info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
730 atomic_inc(&info->send_pending); in smbd_post_send_negotiate_req()
731 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
737 atomic_dec(&info->send_pending); in smbd_post_send_negotiate_req()
738 ib_dma_unmap_single(info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
741 smbd_disconnect_rdma_connection(info); in smbd_post_send_negotiate_req()
744 mempool_free(request, info->request_mempool); in smbd_post_send_negotiate_req()
756 static int manage_credits_prior_sending(struct smbd_connection *info) in manage_credits_prior_sending() argument
760 spin_lock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
761 new_credits = info->new_credits_offered; in manage_credits_prior_sending()
762 info->new_credits_offered = 0; in manage_credits_prior_sending()
763 spin_unlock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
777 static int manage_keep_alive_before_sending(struct smbd_connection *info) in manage_keep_alive_before_sending() argument
779 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { in manage_keep_alive_before_sending()
780 info->keep_alive_requested = KEEP_ALIVE_SENT; in manage_keep_alive_before_sending()
787 static int smbd_post_send(struct smbd_connection *info, in smbd_post_send() argument
798 info->id->device, in smbd_post_send()
813 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send()
816 smbd_disconnect_rdma_connection(info); in smbd_post_send()
820 mod_delayed_work(info->workqueue, &info->idle_timer_work, in smbd_post_send()
821 info->keep_alive_interval*HZ); in smbd_post_send()
826 static int smbd_post_send_sgl(struct smbd_connection *info, in smbd_post_send_sgl() argument
839 rc = wait_event_interruptible(info->wait_send_queue, in smbd_post_send_sgl()
840 atomic_read(&info->send_credits) > 0 || in smbd_post_send_sgl()
841 info->transport_status != SMBD_CONNECTED); in smbd_post_send_sgl()
845 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_sgl()
850 if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { in smbd_post_send_sgl()
851 atomic_inc(&info->send_credits); in smbd_post_send_sgl()
856 wait_event(info->wait_post_send, in smbd_post_send_sgl()
857 atomic_read(&info->send_pending) < info->send_credit_target || in smbd_post_send_sgl()
858 info->transport_status != SMBD_CONNECTED); in smbd_post_send_sgl()
860 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_sgl()
866 if (unlikely(atomic_inc_return(&info->send_pending) > in smbd_post_send_sgl()
867 info->send_credit_target)) { in smbd_post_send_sgl()
868 atomic_dec(&info->send_pending); in smbd_post_send_sgl()
872 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_sgl()
878 request->info = info; in smbd_post_send_sgl()
882 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_sgl()
884 new_credits = manage_credits_prior_sending(info); in smbd_post_send_sgl()
885 atomic_add(new_credits, &info->receive_credits); in smbd_post_send_sgl()
888 info->send_immediate = false; in smbd_post_send_sgl()
891 if (manage_keep_alive_before_sending(info)) in smbd_post_send_sgl()
917 request->sge[0].addr = ib_dma_map_single(info->id->device, in smbd_post_send_sgl()
921 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_sgl()
928 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
934 ib_dma_map_page(info->id->device, sg_page(sg), in smbd_post_send_sgl()
937 info->id->device, request->sge[i+1].addr)) { in smbd_post_send_sgl()
943 request->sge[i+1].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
947 rc = smbd_post_send(info, request); in smbd_post_send_sgl()
954 ib_dma_unmap_single(info->id->device, in smbd_post_send_sgl()
958 mempool_free(request, info->request_mempool); in smbd_post_send_sgl()
961 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_sgl()
962 info->new_credits_offered += new_credits; in smbd_post_send_sgl()
963 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_sgl()
964 atomic_sub(new_credits, &info->receive_credits); in smbd_post_send_sgl()
967 if (atomic_dec_and_test(&info->send_pending)) in smbd_post_send_sgl()
968 wake_up(&info->wait_send_pending); in smbd_post_send_sgl()
972 atomic_inc(&info->send_credits); in smbd_post_send_sgl()
985 static int smbd_post_send_page(struct smbd_connection *info, struct page *page, in smbd_post_send_page() argument
993 return smbd_post_send_sgl(info, &sgl, size, remaining_data_length); in smbd_post_send_page()
1001 static int smbd_post_send_empty(struct smbd_connection *info) in smbd_post_send_empty() argument
1003 info->count_send_empty++; in smbd_post_send_empty()
1004 return smbd_post_send_sgl(info, NULL, 0, 0); in smbd_post_send_empty()
1015 struct smbd_connection *info, struct kvec *iov, int n_vec, in smbd_post_send_data() argument
1033 return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length); in smbd_post_send_data()
1042 struct smbd_connection *info, struct smbd_response *response) in smbd_post_recv() argument
1048 info->id->device, response->packet, in smbd_post_recv()
1049 info->max_receive_size, DMA_FROM_DEVICE); in smbd_post_recv()
1050 if (ib_dma_mapping_error(info->id->device, response->sge.addr)) in smbd_post_recv()
1053 response->sge.length = info->max_receive_size; in smbd_post_recv()
1054 response->sge.lkey = info->pd->local_dma_lkey; in smbd_post_recv()
1063 rc = ib_post_recv(info->id->qp, &recv_wr, NULL); in smbd_post_recv()
1065 ib_dma_unmap_single(info->id->device, response->sge.addr, in smbd_post_recv()
1067 smbd_disconnect_rdma_connection(info); in smbd_post_recv()
1075 static int smbd_negotiate(struct smbd_connection *info) in smbd_negotiate() argument
1078 struct smbd_response *response = get_receive_buffer(info); in smbd_negotiate()
1081 rc = smbd_post_recv(info, response); in smbd_negotiate()
1088 init_completion(&info->negotiate_completion); in smbd_negotiate()
1089 info->negotiate_done = false; in smbd_negotiate()
1090 rc = smbd_post_send_negotiate_req(info); in smbd_negotiate()
1095 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); in smbd_negotiate()
1098 if (info->negotiate_done) in smbd_negotiate()
1112 struct smbd_connection *info, struct smbd_response *response) in put_empty_packet() argument
1114 spin_lock(&info->empty_packet_queue_lock); in put_empty_packet()
1115 list_add_tail(&response->list, &info->empty_packet_queue); in put_empty_packet()
1116 info->count_empty_packet_queue++; in put_empty_packet()
1117 spin_unlock(&info->empty_packet_queue_lock); in put_empty_packet()
1119 queue_work(info->workqueue, &info->post_send_credits_work); in put_empty_packet()
1133 struct smbd_connection *info, in enqueue_reassembly() argument
1137 spin_lock(&info->reassembly_queue_lock); in enqueue_reassembly()
1138 list_add_tail(&response->list, &info->reassembly_queue); in enqueue_reassembly()
1139 info->reassembly_queue_length++; in enqueue_reassembly()
1147 info->reassembly_data_length += data_length; in enqueue_reassembly()
1148 spin_unlock(&info->reassembly_queue_lock); in enqueue_reassembly()
1149 info->count_reassembly_queue++; in enqueue_reassembly()
1150 info->count_enqueue_reassembly_queue++; in enqueue_reassembly()
1158 static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) in _get_first_reassembly() argument
1162 if (!list_empty(&info->reassembly_queue)) { in _get_first_reassembly()
1164 &info->reassembly_queue, in _get_first_reassembly()
1171 struct smbd_connection *info) in get_empty_queue_buffer() argument
1176 spin_lock_irqsave(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1177 if (!list_empty(&info->empty_packet_queue)) { in get_empty_queue_buffer()
1179 &info->empty_packet_queue, in get_empty_queue_buffer()
1182 info->count_empty_packet_queue--; in get_empty_queue_buffer()
1184 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1195 static struct smbd_response *get_receive_buffer(struct smbd_connection *info) in get_receive_buffer() argument
1200 spin_lock_irqsave(&info->receive_queue_lock, flags); in get_receive_buffer()
1201 if (!list_empty(&info->receive_queue)) { in get_receive_buffer()
1203 &info->receive_queue, in get_receive_buffer()
1206 info->count_receive_queue--; in get_receive_buffer()
1207 info->count_get_receive_buffer++; in get_receive_buffer()
1209 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in get_receive_buffer()
1221 struct smbd_connection *info, struct smbd_response *response) in put_receive_buffer() argument
1225 ib_dma_unmap_single(info->id->device, response->sge.addr, in put_receive_buffer()
1228 spin_lock_irqsave(&info->receive_queue_lock, flags); in put_receive_buffer()
1229 list_add_tail(&response->list, &info->receive_queue); in put_receive_buffer()
1230 info->count_receive_queue++; in put_receive_buffer()
1231 info->count_put_receive_buffer++; in put_receive_buffer()
1232 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in put_receive_buffer()
1234 queue_work(info->workqueue, &info->post_send_credits_work); in put_receive_buffer()
1238 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) in allocate_receive_buffers() argument
1243 INIT_LIST_HEAD(&info->reassembly_queue); in allocate_receive_buffers()
1244 spin_lock_init(&info->reassembly_queue_lock); in allocate_receive_buffers()
1245 info->reassembly_data_length = 0; in allocate_receive_buffers()
1246 info->reassembly_queue_length = 0; in allocate_receive_buffers()
1248 INIT_LIST_HEAD(&info->receive_queue); in allocate_receive_buffers()
1249 spin_lock_init(&info->receive_queue_lock); in allocate_receive_buffers()
1250 info->count_receive_queue = 0; in allocate_receive_buffers()
1252 INIT_LIST_HEAD(&info->empty_packet_queue); in allocate_receive_buffers()
1253 spin_lock_init(&info->empty_packet_queue_lock); in allocate_receive_buffers()
1254 info->count_empty_packet_queue = 0; in allocate_receive_buffers()
1256 init_waitqueue_head(&info->wait_receive_queues); in allocate_receive_buffers()
1259 response = mempool_alloc(info->response_mempool, GFP_KERNEL); in allocate_receive_buffers()
1263 response->info = info; in allocate_receive_buffers()
1264 list_add_tail(&response->list, &info->receive_queue); in allocate_receive_buffers()
1265 info->count_receive_queue++; in allocate_receive_buffers()
1271 while (!list_empty(&info->receive_queue)) { in allocate_receive_buffers()
1273 &info->receive_queue, in allocate_receive_buffers()
1276 info->count_receive_queue--; in allocate_receive_buffers()
1278 mempool_free(response, info->response_mempool); in allocate_receive_buffers()
1283 static void destroy_receive_buffers(struct smbd_connection *info) in destroy_receive_buffers() argument
1287 while ((response = get_receive_buffer(info))) in destroy_receive_buffers()
1288 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1290 while ((response = get_empty_queue_buffer(info))) in destroy_receive_buffers()
1291 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1297 struct smbd_connection *info = container_of( in idle_connection_timer() local
1301 if (info->keep_alive_requested != KEEP_ALIVE_NONE) { in idle_connection_timer()
1304 info->keep_alive_requested); in idle_connection_timer()
1305 smbd_disconnect_rdma_connection(info); in idle_connection_timer()
1310 smbd_post_send_empty(info); in idle_connection_timer()
1313 queue_delayed_work(info->workqueue, &info->idle_timer_work, in idle_connection_timer()
1314 info->keep_alive_interval*HZ); in idle_connection_timer()
1324 struct smbd_connection *info = server->smbd_conn; in smbd_destroy() local
1328 if (!info) { in smbd_destroy()
1334 if (info->transport_status != SMBD_DISCONNECTED) { in smbd_destroy()
1338 info->disconn_wait, in smbd_destroy()
1339 info->transport_status == SMBD_DISCONNECTED); in smbd_destroy()
1343 ib_drain_qp(info->id->qp); in smbd_destroy()
1344 rdma_destroy_qp(info->id); in smbd_destroy()
1347 cancel_delayed_work_sync(&info->idle_timer_work); in smbd_destroy()
1350 wait_event(info->wait_send_pending, in smbd_destroy()
1351 atomic_read(&info->send_pending) == 0); in smbd_destroy()
1356 spin_lock_irqsave(&info->reassembly_queue_lock, flags); in smbd_destroy()
1357 response = _get_first_reassembly(info); in smbd_destroy()
1361 &info->reassembly_queue_lock, flags); in smbd_destroy()
1362 put_receive_buffer(info, response); in smbd_destroy()
1365 &info->reassembly_queue_lock, flags); in smbd_destroy()
1367 info->reassembly_data_length = 0; in smbd_destroy()
1370 wait_event(info->wait_receive_queues, in smbd_destroy()
1371 info->count_receive_queue + info->count_empty_packet_queue in smbd_destroy()
1372 == info->receive_credit_max); in smbd_destroy()
1373 destroy_receive_buffers(info); in smbd_destroy()
1383 wake_up_interruptible_all(&info->wait_mr); in smbd_destroy()
1384 while (atomic_read(&info->mr_used_count)) { in smbd_destroy()
1389 destroy_mr_list(info); in smbd_destroy()
1391 ib_free_cq(info->send_cq); in smbd_destroy()
1392 ib_free_cq(info->recv_cq); in smbd_destroy()
1393 ib_dealloc_pd(info->pd); in smbd_destroy()
1394 rdma_destroy_id(info->id); in smbd_destroy()
1397 mempool_destroy(info->request_mempool); in smbd_destroy()
1398 kmem_cache_destroy(info->request_cache); in smbd_destroy()
1400 mempool_destroy(info->response_mempool); in smbd_destroy()
1401 kmem_cache_destroy(info->response_cache); in smbd_destroy()
1403 info->transport_status = SMBD_DESTROYED; in smbd_destroy()
1405 destroy_workqueue(info->workqueue); in smbd_destroy()
1407 kfree(info); in smbd_destroy()
1444 static void destroy_caches_and_workqueue(struct smbd_connection *info) in destroy_caches_and_workqueue() argument
1446 destroy_receive_buffers(info); in destroy_caches_and_workqueue()
1447 destroy_workqueue(info->workqueue); in destroy_caches_and_workqueue()
1448 mempool_destroy(info->response_mempool); in destroy_caches_and_workqueue()
1449 kmem_cache_destroy(info->response_cache); in destroy_caches_and_workqueue()
1450 mempool_destroy(info->request_mempool); in destroy_caches_and_workqueue()
1451 kmem_cache_destroy(info->request_cache); in destroy_caches_and_workqueue()
1455 static int allocate_caches_and_workqueue(struct smbd_connection *info) in allocate_caches_and_workqueue() argument
1460 scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); in allocate_caches_and_workqueue()
1461 info->request_cache = in allocate_caches_and_workqueue()
1467 if (!info->request_cache) in allocate_caches_and_workqueue()
1470 info->request_mempool = in allocate_caches_and_workqueue()
1471 mempool_create(info->send_credit_target, mempool_alloc_slab, in allocate_caches_and_workqueue()
1472 mempool_free_slab, info->request_cache); in allocate_caches_and_workqueue()
1473 if (!info->request_mempool) in allocate_caches_and_workqueue()
1476 scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); in allocate_caches_and_workqueue()
1477 info->response_cache = in allocate_caches_and_workqueue()
1481 info->max_receive_size, in allocate_caches_and_workqueue()
1483 if (!info->response_cache) in allocate_caches_and_workqueue()
1486 info->response_mempool = in allocate_caches_and_workqueue()
1487 mempool_create(info->receive_credit_max, mempool_alloc_slab, in allocate_caches_and_workqueue()
1488 mempool_free_slab, info->response_cache); in allocate_caches_and_workqueue()
1489 if (!info->response_mempool) in allocate_caches_and_workqueue()
1492 scnprintf(name, MAX_NAME_LEN, "smbd_%p", info); in allocate_caches_and_workqueue()
1493 info->workqueue = create_workqueue(name); in allocate_caches_and_workqueue()
1494 if (!info->workqueue) in allocate_caches_and_workqueue()
1497 rc = allocate_receive_buffers(info, info->receive_credit_max); in allocate_caches_and_workqueue()
1506 destroy_workqueue(info->workqueue); in allocate_caches_and_workqueue()
1508 mempool_destroy(info->response_mempool); in allocate_caches_and_workqueue()
1510 kmem_cache_destroy(info->response_cache); in allocate_caches_and_workqueue()
1512 mempool_destroy(info->request_mempool); in allocate_caches_and_workqueue()
1514 kmem_cache_destroy(info->request_cache); in allocate_caches_and_workqueue()
1523 struct smbd_connection *info; in _smbd_get_connection() local
1530 info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); in _smbd_get_connection()
1531 if (!info) in _smbd_get_connection()
1534 info->transport_status = SMBD_CONNECTING; in _smbd_get_connection()
1535 rc = smbd_ia_open(info, dstaddr, port); in _smbd_get_connection()
1541 if (smbd_send_credit_target > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1542 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1545 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1546 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1550 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1551 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1554 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1555 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1559 info->receive_credit_max = smbd_receive_credit_max; in _smbd_get_connection()
1560 info->send_credit_target = smbd_send_credit_target; in _smbd_get_connection()
1561 info->max_send_size = smbd_max_send_size; in _smbd_get_connection()
1562 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; in _smbd_get_connection()
1563 info->max_receive_size = smbd_max_receive_size; in _smbd_get_connection()
1564 info->keep_alive_interval = smbd_keep_alive_interval; in _smbd_get_connection()
1566 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || in _smbd_get_connection()
1567 info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { in _smbd_get_connection()
1571 info->id->device->name, in _smbd_get_connection()
1572 info->id->device->attrs.max_send_sge, in _smbd_get_connection()
1573 info->id->device->attrs.max_recv_sge); in _smbd_get_connection()
1577 info->send_cq = NULL; in _smbd_get_connection()
1578 info->recv_cq = NULL; in _smbd_get_connection()
1579 info->send_cq = in _smbd_get_connection()
1580 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1581 info->send_credit_target, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1582 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1583 info->send_cq = NULL; in _smbd_get_connection()
1587 info->recv_cq = in _smbd_get_connection()
1588 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1589 info->receive_credit_max, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1590 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection()
1591 info->recv_cq = NULL; in _smbd_get_connection()
1597 qp_attr.qp_context = info; in _smbd_get_connection()
1598 qp_attr.cap.max_send_wr = info->send_credit_target; in _smbd_get_connection()
1599 qp_attr.cap.max_recv_wr = info->receive_credit_max; in _smbd_get_connection()
1605 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1606 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection()
1609 rc = rdma_create_qp(info->id, info->pd, &qp_attr); in _smbd_get_connection()
1619 info->id->device->attrs.max_qp_rd_atom in _smbd_get_connection()
1621 info->id->device->attrs.max_qp_rd_atom : in _smbd_get_connection()
1623 info->responder_resources = conn_param.responder_resources; in _smbd_get_connection()
1625 info->responder_resources); in _smbd_get_connection()
1628 info->id->device->ops.get_port_immutable( in _smbd_get_connection()
1629 info->id->device, info->id->port_num, &port_immutable); in _smbd_get_connection()
1631 ird_ord_hdr[0] = info->responder_resources; in _smbd_get_connection()
1647 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1648 init_waitqueue_head(&info->disconn_wait); in _smbd_get_connection()
1649 init_waitqueue_head(&info->wait_reassembly_queue); in _smbd_get_connection()
1650 rc = rdma_connect(info->id, &conn_param); in _smbd_get_connection()
1657 info->conn_wait, info->transport_status != SMBD_CONNECTING); in _smbd_get_connection()
1659 if (info->transport_status != SMBD_CONNECTED) { in _smbd_get_connection()
1666 rc = allocate_caches_and_workqueue(info); in _smbd_get_connection()
1672 init_waitqueue_head(&info->wait_send_queue); in _smbd_get_connection()
1673 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); in _smbd_get_connection()
1674 queue_delayed_work(info->workqueue, &info->idle_timer_work, in _smbd_get_connection()
1675 info->keep_alive_interval*HZ); in _smbd_get_connection()
1677 init_waitqueue_head(&info->wait_send_pending); in _smbd_get_connection()
1678 atomic_set(&info->send_pending, 0); in _smbd_get_connection()
1680 init_waitqueue_head(&info->wait_post_send); in _smbd_get_connection()
1682 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1683 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); in _smbd_get_connection()
1684 info->new_credits_offered = 0; in _smbd_get_connection()
1685 spin_lock_init(&info->lock_new_credits_offered); in _smbd_get_connection()
1687 rc = smbd_negotiate(info); in _smbd_get_connection()
1693 rc = allocate_mr_list(info); in _smbd_get_connection()
1699 return info; in _smbd_get_connection()
1707 cancel_delayed_work_sync(&info->idle_timer_work); in _smbd_get_connection()
1708 destroy_caches_and_workqueue(info); in _smbd_get_connection()
1709 info->transport_status = SMBD_NEGOTIATE_FAILED; in _smbd_get_connection()
1710 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1711 rdma_disconnect(info->id); in _smbd_get_connection()
1712 wait_event(info->conn_wait, in _smbd_get_connection()
1713 info->transport_status == SMBD_DISCONNECTED); in _smbd_get_connection()
1717 rdma_destroy_qp(info->id); in _smbd_get_connection()
1721 if (info->send_cq) in _smbd_get_connection()
1722 ib_free_cq(info->send_cq); in _smbd_get_connection()
1723 if (info->recv_cq) in _smbd_get_connection()
1724 ib_free_cq(info->recv_cq); in _smbd_get_connection()
1727 ib_dealloc_pd(info->pd); in _smbd_get_connection()
1728 rdma_destroy_id(info->id); in _smbd_get_connection()
1731 kfree(info); in _smbd_get_connection()
1765 static int smbd_recv_buf(struct smbd_connection *info, char *buf, in smbd_recv_buf() argument
1781 info->reassembly_data_length); in smbd_recv_buf()
1782 if (info->reassembly_data_length >= size) { in smbd_recv_buf()
1794 queue_length = info->reassembly_queue_length; in smbd_recv_buf()
1797 offset = info->first_entry_offset; in smbd_recv_buf()
1799 response = _get_first_reassembly(info); in smbd_recv_buf()
1843 &info->reassembly_queue_lock); in smbd_recv_buf()
1846 &info->reassembly_queue_lock); in smbd_recv_buf()
1849 info->count_reassembly_queue--; in smbd_recv_buf()
1850 info->count_dequeue_reassembly_queue++; in smbd_recv_buf()
1851 put_receive_buffer(info, response); in smbd_recv_buf()
1865 spin_lock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1866 info->reassembly_data_length -= data_read; in smbd_recv_buf()
1867 info->reassembly_queue_length -= queue_removed; in smbd_recv_buf()
1868 spin_unlock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1870 info->first_entry_offset = offset; in smbd_recv_buf()
1872 data_read, info->reassembly_data_length, in smbd_recv_buf()
1873 info->first_entry_offset); in smbd_recv_buf()
1880 info->wait_reassembly_queue, in smbd_recv_buf()
1881 info->reassembly_data_length >= size || in smbd_recv_buf()
1882 info->transport_status != SMBD_CONNECTED); in smbd_recv_buf()
1887 if (info->transport_status != SMBD_CONNECTED) { in smbd_recv_buf()
1901 static int smbd_recv_page(struct smbd_connection *info, in smbd_recv_page() argument
1911 info->wait_reassembly_queue, in smbd_recv_page()
1912 info->reassembly_data_length >= to_read || in smbd_recv_page()
1913 info->transport_status != SMBD_CONNECTED); in smbd_recv_page()
1924 ret = smbd_recv_buf(info, to_address, to_read); in smbd_recv_page()
1935 int smbd_recv(struct smbd_connection *info, struct msghdr *msg) in smbd_recv() argument
1954 rc = smbd_recv_buf(info, buf, to_read); in smbd_recv()
1961 rc = smbd_recv_page(info, page, page_offset, to_read); in smbd_recv()
1987 struct smbd_connection *info = server->smbd_conn; in smbd_send() local
1995 info->max_send_size - sizeof(struct smbd_data_transfer); in smbd_send()
2001 if (info->transport_status != SMBD_CONNECTED) in smbd_send()
2013 if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { in smbd_send()
2016 remaining_data_length, info->max_fragmented_send_size); in smbd_send()
2076 rc = smbd_post_send_data(info, vecs, j, remaining_data_length); in smbd_send()
2094 info, rqst->rq_pages[i], in smbd_send()
2111 wait_event(info->wait_send_pending, in smbd_send()
2112 atomic_read(&info->send_pending) == 0); in smbd_send()
2141 struct smbd_connection *info = in smbd_mr_recovery_work() local
2146 list_for_each_entry(smbdirect_mr, &info->mr_list, list) { in smbd_mr_recovery_work()
2155 smbd_disconnect_rdma_connection(info); in smbd_mr_recovery_work()
2160 info->pd, info->mr_type, in smbd_mr_recovery_work()
2161 info->max_frmr_depth); in smbd_mr_recovery_work()
2164 info->mr_type, in smbd_mr_recovery_work()
2165 info->max_frmr_depth); in smbd_mr_recovery_work()
2166 smbd_disconnect_rdma_connection(info); in smbd_mr_recovery_work()
2182 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_mr_recovery_work()
2183 wake_up_interruptible(&info->wait_mr); in smbd_mr_recovery_work()
2187 static void destroy_mr_list(struct smbd_connection *info) in destroy_mr_list() argument
2191 cancel_work_sync(&info->mr_recovery_work); in destroy_mr_list()
2192 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { in destroy_mr_list()
2194 ib_dma_unmap_sg(info->id->device, mr->sgl, in destroy_mr_list()
2209 static int allocate_mr_list(struct smbd_connection *info) in allocate_mr_list() argument
2214 INIT_LIST_HEAD(&info->mr_list); in allocate_mr_list()
2215 init_waitqueue_head(&info->wait_mr); in allocate_mr_list()
2216 spin_lock_init(&info->mr_list_lock); in allocate_mr_list()
2217 atomic_set(&info->mr_ready_count, 0); in allocate_mr_list()
2218 atomic_set(&info->mr_used_count, 0); in allocate_mr_list()
2219 init_waitqueue_head(&info->wait_for_mr_cleanup); in allocate_mr_list()
2221 for (i = 0; i < info->responder_resources * 2; i++) { in allocate_mr_list()
2225 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, in allocate_mr_list()
2226 info->max_frmr_depth); in allocate_mr_list()
2229 info->mr_type, info->max_frmr_depth); in allocate_mr_list()
2233 info->max_frmr_depth, in allocate_mr_list()
2242 smbdirect_mr->conn = info; in allocate_mr_list()
2244 list_add_tail(&smbdirect_mr->list, &info->mr_list); in allocate_mr_list()
2245 atomic_inc(&info->mr_ready_count); in allocate_mr_list()
2247 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2253 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { in allocate_mr_list()
2269 static struct smbd_mr *get_mr(struct smbd_connection *info) in get_mr() argument
2274 rc = wait_event_interruptible(info->wait_mr, in get_mr()
2275 atomic_read(&info->mr_ready_count) || in get_mr()
2276 info->transport_status != SMBD_CONNECTED); in get_mr()
2282 if (info->transport_status != SMBD_CONNECTED) { in get_mr()
2284 info->transport_status); in get_mr()
2288 spin_lock(&info->mr_list_lock); in get_mr()
2289 list_for_each_entry(ret, &info->mr_list, list) { in get_mr()
2292 spin_unlock(&info->mr_list_lock); in get_mr()
2293 atomic_dec(&info->mr_ready_count); in get_mr()
2294 atomic_inc(&info->mr_used_count); in get_mr()
2299 spin_unlock(&info->mr_list_lock); in get_mr()
2317 struct smbd_connection *info, struct page *pages[], int num_pages, in smbd_register_mr() argument
2325 if (num_pages > info->max_frmr_depth) { in smbd_register_mr()
2327 num_pages, info->max_frmr_depth); in smbd_register_mr()
2331 smbdirect_mr = get_mr(info); in smbd_register_mr()
2362 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir); in smbd_register_mr()
2397 rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL); in smbd_register_mr()
2406 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl, in smbd_register_mr()
2411 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_register_mr()
2412 wake_up(&info->wait_for_mr_cleanup); in smbd_register_mr()
2414 smbd_disconnect_rdma_connection(info); in smbd_register_mr()
2443 struct smbd_connection *info = smbdirect_mr->conn; in smbd_deregister_mr() local
2457 rc = ib_post_send(info->id->qp, wr, NULL); in smbd_deregister_mr()
2460 smbd_disconnect_rdma_connection(info); in smbd_deregister_mr()
2474 info->id->device, smbdirect_mr->sgl, in smbd_deregister_mr()
2478 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_deregister_mr()
2479 wake_up_interruptible(&info->wait_mr); in smbd_deregister_mr()
2485 queue_work(info->workqueue, &info->mr_recovery_work); in smbd_deregister_mr()
2488 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_deregister_mr()
2489 wake_up(&info->wait_for_mr_cleanup); in smbd_deregister_mr()