Lines Matching refs:cdev

63 		ret = chtls_listen_start(clisten->cdev, clisten->sk);  in listen_notify_handler()
67 chtls_listen_stop(clisten->cdev, clisten->sk); in listen_notify_handler()
86 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) in chtls_start_listen() argument
101 clisten->cdev = cdev; in chtls_start_listen()
110 static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) in chtls_stop_listen() argument
120 clisten->cdev = cdev; in chtls_stop_listen()
131 struct chtls_dev *cdev; in chtls_inline_feature() local
134 cdev = to_chtls_dev(dev); in chtls_inline_feature()
136 for (i = 0; i < cdev->lldi->nports; i++) { in chtls_inline_feature()
137 netdev = cdev->ports[i]; in chtls_inline_feature()
146 struct chtls_dev *cdev = to_chtls_dev(dev); in chtls_create_hash() local
149 return chtls_start_listen(cdev, sk); in chtls_create_hash()
155 struct chtls_dev *cdev = to_chtls_dev(dev); in chtls_destroy_hash() local
158 chtls_stop_listen(cdev, sk); in chtls_destroy_hash()
161 static void chtls_free_uld(struct chtls_dev *cdev) in chtls_free_uld() argument
165 tls_toe_unregister_device(&cdev->tlsdev); in chtls_free_uld()
166 kvfree(cdev->kmap.addr); in chtls_free_uld()
167 idr_destroy(&cdev->hwtid_idr); in chtls_free_uld()
169 kfree_skb(cdev->rspq_skb_cache[i]); in chtls_free_uld()
170 kfree(cdev->lldi); in chtls_free_uld()
171 kfree_skb(cdev->askb); in chtls_free_uld()
172 kfree(cdev); in chtls_free_uld()
178 struct chtls_dev *cdev; in chtls_dev_release() local
182 cdev = to_chtls_dev(dev); in chtls_dev_release()
185 adap = pci_get_drvdata(cdev->pdev); in chtls_dev_release()
189 chtls_free_uld(cdev); in chtls_dev_release()
192 static void chtls_register_dev(struct chtls_dev *cdev) in chtls_register_dev() argument
194 struct tls_toe_device *tlsdev = &cdev->tlsdev; in chtls_register_dev()
197 strlcat(tlsdev->name, cdev->lldi->ports[0]->name, in chtls_register_dev()
205 cdev->cdev_state = CHTLS_CDEV_STATE_UP; in chtls_register_dev()
210 struct chtls_dev *cdev = container_of(task_param, in process_deferq() local
214 spin_lock_bh(&cdev->deferq.lock); in process_deferq()
215 while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) { in process_deferq()
216 spin_unlock_bh(&cdev->deferq.lock); in process_deferq()
217 DEFERRED_SKB_CB(skb)->handler(cdev, skb); in process_deferq()
218 spin_lock_bh(&cdev->deferq.lock); in process_deferq()
220 spin_unlock_bh(&cdev->deferq.lock); in process_deferq()
223 static int chtls_get_skb(struct chtls_dev *cdev) in chtls_get_skb() argument
225 cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL); in chtls_get_skb()
226 if (!cdev->askb) in chtls_get_skb()
229 skb_put(cdev->askb, sizeof(struct tcphdr)); in chtls_get_skb()
230 skb_reset_transport_header(cdev->askb); in chtls_get_skb()
231 memset(cdev->askb->data, 0, cdev->askb->len); in chtls_get_skb()
238 struct chtls_dev *cdev; in chtls_uld_add() local
241 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); in chtls_uld_add()
242 if (!cdev) in chtls_uld_add()
249 if (chtls_get_skb(cdev)) in chtls_uld_add()
253 cdev->lldi = lldi; in chtls_uld_add()
254 cdev->pdev = lldi->pdev; in chtls_uld_add()
255 cdev->tids = lldi->tids; in chtls_uld_add()
256 cdev->ports = lldi->ports; in chtls_uld_add()
257 cdev->mtus = lldi->mtus; in chtls_uld_add()
258 cdev->tids = lldi->tids; in chtls_uld_add()
259 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) in chtls_uld_add()
265 cdev->rspq_skb_cache[i] = __alloc_skb(size, in chtls_uld_add()
268 if (unlikely(!cdev->rspq_skb_cache[i])) in chtls_uld_add()
272 idr_init(&cdev->hwtid_idr); in chtls_uld_add()
273 INIT_WORK(&cdev->deferq_task, process_deferq); in chtls_uld_add()
274 spin_lock_init(&cdev->listen_lock); in chtls_uld_add()
275 spin_lock_init(&cdev->idr_lock); in chtls_uld_add()
276 cdev->send_page_order = min_t(uint, get_order(32768), in chtls_uld_add()
278 cdev->max_host_sndbuf = 48 * 1024; in chtls_uld_add()
281 if (chtls_init_kmap(cdev, lldi)) in chtls_uld_add()
285 list_add_tail(&cdev->list, &cdev_list); in chtls_uld_add()
288 return cdev; in chtls_uld_add()
291 kfree_skb(cdev->rspq_skb_cache[j]); in chtls_uld_add()
292 kfree_skb(cdev->askb); in chtls_uld_add()
296 kfree(cdev); in chtls_uld_add()
303 struct chtls_dev *cdev, *tmp; in chtls_free_all_uld() local
306 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { in chtls_free_all_uld()
307 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) { in chtls_free_all_uld()
308 list_del(&cdev->list); in chtls_free_all_uld()
309 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); in chtls_free_all_uld()
317 struct chtls_dev *cdev = handle; in chtls_uld_state_change() local
321 chtls_register_dev(cdev); in chtls_uld_state_change()
329 list_del(&cdev->list); in chtls_uld_state_change()
331 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); in chtls_uld_state_change()
364 static int chtls_recv_packet(struct chtls_dev *cdev, in chtls_recv_packet() argument
371 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); in chtls_recv_packet()
375 ret = chtls_handlers[opcode](cdev, skb); in chtls_recv_packet()
382 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp) in chtls_recv_rsp() argument
394 skb = cdev->rspq_skb_cache[rspq_bin]; in chtls_recv_rsp()
414 ret = chtls_handlers[opcode](cdev, skb); in chtls_recv_rsp()
421 static void chtls_recv(struct chtls_dev *cdev, in chtls_recv() argument
433 ret = chtls_handlers[opcode](cdev, skb); in chtls_recv()
441 struct chtls_dev *cdev = handle; in chtls_uld_rx_handler() local
448 if (chtls_recv_packet(cdev, gl, rsp) < 0) in chtls_uld_rx_handler()
454 return chtls_recv_rsp(cdev, rsp); in chtls_uld_rx_handler()
460 chtls_recv(cdev, &skb, rsp); in chtls_uld_rx_handler()