Lines Matching refs:ch
228 static int riocm_ch_close(struct rio_channel *ch);
248 static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) in riocm_cmp() argument
252 spin_lock_bh(&ch->lock); in riocm_cmp()
253 ret = (ch->state == cmp); in riocm_cmp()
254 spin_unlock_bh(&ch->lock); in riocm_cmp()
258 static int riocm_cmp_exch(struct rio_channel *ch, in riocm_cmp_exch() argument
263 spin_lock_bh(&ch->lock); in riocm_cmp_exch()
264 ret = (ch->state == cmp); in riocm_cmp_exch()
266 ch->state = exch; in riocm_cmp_exch()
267 spin_unlock_bh(&ch->lock); in riocm_cmp_exch()
271 static enum rio_cm_state riocm_exch(struct rio_channel *ch, in riocm_exch() argument
276 spin_lock_bh(&ch->lock); in riocm_exch()
277 old = ch->state; in riocm_exch()
278 ch->state = exch; in riocm_exch()
279 spin_unlock_bh(&ch->lock); in riocm_exch()
285 struct rio_channel *ch; in riocm_get_channel() local
288 ch = idr_find(&ch_idr, nr); in riocm_get_channel()
289 if (ch) in riocm_get_channel()
290 kref_get(&ch->ref); in riocm_get_channel()
292 return ch; in riocm_get_channel()
295 static void riocm_put_channel(struct rio_channel *ch) in riocm_put_channel() argument
297 kref_put(&ch->ref, riocm_ch_free); in riocm_put_channel()
378 struct rio_channel *ch; in riocm_req_handler() local
385 ch = riocm_get_channel(chnum); in riocm_req_handler()
387 if (!ch) in riocm_req_handler()
390 if (ch->state != RIO_CM_LISTEN) { in riocm_req_handler()
392 riocm_put_channel(ch); in riocm_req_handler()
398 riocm_put_channel(ch); in riocm_req_handler()
406 spin_lock_bh(&ch->lock); in riocm_req_handler()
407 list_add_tail(&req->node, &ch->accept_queue); in riocm_req_handler()
408 spin_unlock_bh(&ch->lock); in riocm_req_handler()
409 complete(&ch->comp); in riocm_req_handler()
410 riocm_put_channel(ch); in riocm_req_handler()
425 struct rio_channel *ch; in riocm_resp_handler() local
430 ch = riocm_get_channel(chnum); in riocm_resp_handler()
431 if (!ch) in riocm_resp_handler()
434 if (ch->state != RIO_CM_CONNECT) { in riocm_resp_handler()
435 riocm_put_channel(ch); in riocm_resp_handler()
439 riocm_exch(ch, RIO_CM_CONNECTED); in riocm_resp_handler()
440 ch->rem_channel = ntohs(hh->src_ch); in riocm_resp_handler()
441 complete(&ch->comp); in riocm_resp_handler()
442 riocm_put_channel(ch); in riocm_resp_handler()
457 struct rio_channel *ch; in riocm_close_handler() local
464 ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); in riocm_close_handler()
465 if (!ch) { in riocm_close_handler()
469 idr_remove(&ch_idr, ch->id); in riocm_close_handler()
472 riocm_exch(ch, RIO_CM_DISCONNECT); in riocm_close_handler()
474 ret = riocm_ch_close(ch); in riocm_close_handler()
529 struct rio_channel *ch; in rio_rx_data_handler() local
535 ch = riocm_get_channel(ntohs(hdr->dst_ch)); in rio_rx_data_handler()
536 if (!ch) { in rio_rx_data_handler()
543 spin_lock(&ch->lock); in rio_rx_data_handler()
545 if (ch->state != RIO_CM_CONNECTED) { in rio_rx_data_handler()
548 ch->id, ch->state); in rio_rx_data_handler()
549 spin_unlock(&ch->lock); in rio_rx_data_handler()
551 riocm_put_channel(ch); in rio_rx_data_handler()
555 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { in rio_rx_data_handler()
557 riocm_debug(RX_DATA, "ch=%d is full", ch->id); in rio_rx_data_handler()
558 spin_unlock(&ch->lock); in rio_rx_data_handler()
560 riocm_put_channel(ch); in rio_rx_data_handler()
564 ch->rx_ring.buf[ch->rx_ring.head] = buf; in rio_rx_data_handler()
565 ch->rx_ring.head++; in rio_rx_data_handler()
566 ch->rx_ring.count++; in rio_rx_data_handler()
567 ch->rx_ring.head %= RIOCM_RX_RING_SIZE; in rio_rx_data_handler()
569 complete(&ch->comp); in rio_rx_data_handler()
571 spin_unlock(&ch->lock); in rio_rx_data_handler()
572 riocm_put_channel(ch); in rio_rx_data_handler()
783 struct rio_channel *ch; in riocm_ch_send() local
790 ch = riocm_get_channel(ch_id); in riocm_ch_send()
791 if (!ch) { in riocm_ch_send()
797 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { in riocm_ch_send()
807 hdr->bhdr.src_id = htonl(ch->loc_destid); in riocm_ch_send()
808 hdr->bhdr.dst_id = htonl(ch->rem_destid); in riocm_ch_send()
813 hdr->dst_ch = htons(ch->rem_channel); in riocm_ch_send()
814 hdr->src_ch = htons(ch->id); in riocm_ch_send()
823 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); in riocm_ch_send()
825 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); in riocm_ch_send()
827 riocm_put_channel(ch); in riocm_ch_send()
831 static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) in riocm_ch_free_rxbuf() argument
835 spin_lock_bh(&ch->lock); in riocm_ch_free_rxbuf()
838 if (ch->rx_ring.inuse[i] == buf) { in riocm_ch_free_rxbuf()
839 ch->rx_ring.inuse[i] = NULL; in riocm_ch_free_rxbuf()
840 ch->rx_ring.inuse_cnt--; in riocm_ch_free_rxbuf()
846 spin_unlock_bh(&ch->lock); in riocm_ch_free_rxbuf()
866 static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) in riocm_ch_receive() argument
872 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { in riocm_ch_receive()
877 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { in riocm_ch_receive()
885 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); in riocm_ch_receive()
887 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); in riocm_ch_receive()
894 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; in riocm_ch_receive()
899 spin_lock_bh(&ch->lock); in riocm_ch_receive()
901 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; in riocm_ch_receive()
902 ch->rx_ring.buf[ch->rx_ring.tail] = NULL; in riocm_ch_receive()
903 ch->rx_ring.count--; in riocm_ch_receive()
904 ch->rx_ring.tail++; in riocm_ch_receive()
905 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; in riocm_ch_receive()
909 if (ch->rx_ring.inuse[i] == NULL) { in riocm_ch_receive()
910 ch->rx_ring.inuse[i] = rxmsg; in riocm_ch_receive()
911 ch->rx_ring.inuse_cnt++; in riocm_ch_receive()
923 spin_unlock_bh(&ch->lock); in riocm_ch_receive()
945 struct rio_channel *ch = NULL; in riocm_ch_connect() local
950 ch = riocm_get_channel(loc_ch); in riocm_ch_connect()
951 if (!ch) in riocm_ch_connect()
954 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { in riocm_ch_connect()
959 ch->cmdev = cm; in riocm_ch_connect()
960 ch->rdev = peer->rdev; in riocm_ch_connect()
961 ch->context = NULL; in riocm_ch_connect()
962 ch->loc_destid = cm->mport->host_deviceid; in riocm_ch_connect()
963 ch->rem_channel = rem_ch; in riocm_ch_connect()
975 hdr->bhdr.src_id = htonl(ch->loc_destid); in riocm_ch_connect()
1000 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); in riocm_ch_connect()
1005 wret = wait_for_completion_interruptible_timeout(&ch->comp, in riocm_ch_connect()
1007 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); in riocm_ch_connect()
1014 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; in riocm_ch_connect()
1017 riocm_put_channel(ch); in riocm_ch_connect()
1021 static int riocm_send_ack(struct rio_channel *ch) in riocm_send_ack() argument
1030 hdr->bhdr.src_id = htonl(ch->loc_destid); in riocm_send_ack()
1031 hdr->bhdr.dst_id = htonl(ch->rem_destid); in riocm_send_ack()
1032 hdr->dst_ch = htons(ch->rem_channel); in riocm_send_ack()
1033 hdr->src_ch = htons(ch->id); in riocm_send_ack()
1043 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); in riocm_send_ack()
1045 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, in riocm_send_ack()
1046 ch->rdev, hdr, sizeof(*hdr))) in riocm_send_ack()
1052 ch->id, rio_name(ch->rdev), ret); in riocm_send_ack()
1074 struct rio_channel *ch; in riocm_ch_accept() local
1082 ch = riocm_get_channel(ch_id); in riocm_ch_accept()
1083 if (!ch) in riocm_ch_accept()
1086 if (!riocm_cmp(ch, RIO_CM_LISTEN)) { in riocm_ch_accept()
1093 if (!try_wait_for_completion(&ch->comp)) { in riocm_ch_accept()
1098 riocm_debug(WAIT, "on %d", ch->id); in riocm_ch_accept()
1100 wret = wait_for_completion_interruptible_timeout(&ch->comp, in riocm_ch_accept()
1111 spin_lock_bh(&ch->lock); in riocm_ch_accept()
1113 if (ch->state != RIO_CM_LISTEN) { in riocm_ch_accept()
1115 } else if (list_empty(&ch->accept_queue)) { in riocm_ch_accept()
1117 ch->id); in riocm_ch_accept()
1121 spin_unlock_bh(&ch->lock); in riocm_ch_accept()
1124 riocm_debug(WAIT, "on %d returns %d", ch->id, err); in riocm_ch_accept()
1138 spin_lock_bh(&ch->lock); in riocm_ch_accept()
1140 req = list_first_entry(&ch->accept_queue, struct conn_req, node); in riocm_ch_accept()
1142 new_ch->cmdev = ch->cmdev; in riocm_ch_accept()
1143 new_ch->loc_destid = ch->loc_destid; in riocm_ch_accept()
1147 spin_unlock_bh(&ch->lock); in riocm_ch_accept()
1148 riocm_put_channel(ch); in riocm_ch_accept()
1149 ch = NULL; in riocm_ch_accept()
1187 if (ch) in riocm_ch_accept()
1188 riocm_put_channel(ch); in riocm_ch_accept()
1203 struct rio_channel *ch = NULL; in riocm_ch_listen() local
1208 ch = riocm_get_channel(ch_id); in riocm_ch_listen()
1209 if (!ch) in riocm_ch_listen()
1211 if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) in riocm_ch_listen()
1213 riocm_put_channel(ch); in riocm_ch_listen()
1230 struct rio_channel *ch = NULL; in riocm_ch_bind() local
1249 ch = riocm_get_channel(ch_id); in riocm_ch_bind()
1250 if (!ch) { in riocm_ch_bind()
1255 spin_lock_bh(&ch->lock); in riocm_ch_bind()
1256 if (ch->state != RIO_CM_IDLE) { in riocm_ch_bind()
1257 spin_unlock_bh(&ch->lock); in riocm_ch_bind()
1262 ch->cmdev = cm; in riocm_ch_bind()
1263 ch->loc_destid = cm->mport->host_deviceid; in riocm_ch_bind()
1264 ch->context = context; in riocm_ch_bind()
1265 ch->state = RIO_CM_CHAN_BOUND; in riocm_ch_bind()
1266 spin_unlock_bh(&ch->lock); in riocm_ch_bind()
1268 riocm_put_channel(ch); in riocm_ch_bind()
1285 struct rio_channel *ch; in riocm_ch_alloc() local
1287 ch = kzalloc(sizeof(*ch), GFP_KERNEL); in riocm_ch_alloc()
1288 if (!ch) in riocm_ch_alloc()
1303 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); in riocm_ch_alloc()
1308 kfree(ch); in riocm_ch_alloc()
1312 ch->id = (u16)id; in riocm_ch_alloc()
1313 ch->state = RIO_CM_IDLE; in riocm_ch_alloc()
1314 spin_lock_init(&ch->lock); in riocm_ch_alloc()
1315 INIT_LIST_HEAD(&ch->accept_queue); in riocm_ch_alloc()
1316 INIT_LIST_HEAD(&ch->ch_node); in riocm_ch_alloc()
1317 init_completion(&ch->comp); in riocm_ch_alloc()
1318 init_completion(&ch->comp_close); in riocm_ch_alloc()
1319 kref_init(&ch->ref); in riocm_ch_alloc()
1320 ch->rx_ring.head = 0; in riocm_ch_alloc()
1321 ch->rx_ring.tail = 0; in riocm_ch_alloc()
1322 ch->rx_ring.count = 0; in riocm_ch_alloc()
1323 ch->rx_ring.inuse_cnt = 0; in riocm_ch_alloc()
1325 return ch; in riocm_ch_alloc()
1346 struct rio_channel *ch = NULL; in riocm_ch_create() local
1348 ch = riocm_ch_alloc(*ch_num); in riocm_ch_create()
1350 if (IS_ERR(ch)) in riocm_ch_create()
1352 *ch_num, PTR_ERR(ch)); in riocm_ch_create()
1354 *ch_num = ch->id; in riocm_ch_create()
1356 return ch; in riocm_ch_create()
1365 struct rio_channel *ch = container_of(ref, struct rio_channel, ref); in riocm_ch_free() local
1368 riocm_debug(CHOP, "(ch_%d)", ch->id); in riocm_ch_free()
1370 if (ch->rx_ring.inuse_cnt) { in riocm_ch_free()
1372 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { in riocm_ch_free()
1373 if (ch->rx_ring.inuse[i] != NULL) { in riocm_ch_free()
1374 kfree(ch->rx_ring.inuse[i]); in riocm_ch_free()
1375 ch->rx_ring.inuse_cnt--; in riocm_ch_free()
1380 if (ch->rx_ring.count) in riocm_ch_free()
1381 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { in riocm_ch_free()
1382 if (ch->rx_ring.buf[i] != NULL) { in riocm_ch_free()
1383 kfree(ch->rx_ring.buf[i]); in riocm_ch_free()
1384 ch->rx_ring.count--; in riocm_ch_free()
1388 complete(&ch->comp_close); in riocm_ch_free()
1391 static int riocm_send_close(struct rio_channel *ch) in riocm_send_close() argument
1404 hdr->bhdr.src_id = htonl(ch->loc_destid); in riocm_send_close()
1405 hdr->bhdr.dst_id = htonl(ch->rem_destid); in riocm_send_close()
1410 hdr->dst_ch = htons(ch->rem_channel); in riocm_send_close()
1411 hdr->src_ch = htons(ch->id); in riocm_send_close()
1417 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); in riocm_send_close()
1419 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, in riocm_send_close()
1425 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); in riocm_send_close()
1434 static int riocm_ch_close(struct rio_channel *ch) in riocm_ch_close() argument
1442 ch->id, current->comm, task_pid_nr(current)); in riocm_ch_close()
1444 state = riocm_exch(ch, RIO_CM_DESTROYING); in riocm_ch_close()
1446 riocm_send_close(ch); in riocm_ch_close()
1448 complete_all(&ch->comp); in riocm_ch_close()
1450 riocm_put_channel(ch); in riocm_ch_close()
1451 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); in riocm_ch_close()
1453 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); in riocm_ch_close()
1458 current->comm, task_pid_nr(current), ch->id); in riocm_ch_close()
1463 current->comm, task_pid_nr(current), ch->id); in riocm_ch_close()
1468 riocm_debug(CHOP, "ch_%d resources released", ch->id); in riocm_ch_close()
1469 kfree(ch); in riocm_ch_close()
1471 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); in riocm_ch_close()
1496 struct rio_channel *ch, *_c; in riocm_cdev_release() local
1505 idr_for_each_entry(&ch_idr, ch, i) { in riocm_cdev_release()
1506 if (ch && ch->filp == filp) { in riocm_cdev_release()
1508 ch->id, current->comm, in riocm_cdev_release()
1510 idr_remove(&ch_idr, ch->id); in riocm_cdev_release()
1511 list_add(&ch->ch_node, &list); in riocm_cdev_release()
1517 list_for_each_entry_safe(ch, _c, &list, ch_node) { in riocm_cdev_release()
1518 list_del(&ch->ch_node); in riocm_cdev_release()
1519 riocm_ch_close(ch); in riocm_cdev_release()
1660 struct rio_channel *ch; in cm_chan_create() local
1667 ch = riocm_ch_create(&ch_num); in cm_chan_create()
1668 if (IS_ERR(ch)) in cm_chan_create()
1669 return PTR_ERR(ch); in cm_chan_create()
1671 ch->filp = filp; in cm_chan_create()
1686 struct rio_channel *ch; in cm_chan_close() local
1695 ch = idr_find(&ch_idr, ch_num); in cm_chan_close()
1696 if (!ch) { in cm_chan_close()
1700 if (ch->filp != filp) { in cm_chan_close()
1704 idr_remove(&ch_idr, ch->id); in cm_chan_close()
1707 return riocm_ch_close(ch); in cm_chan_close()
1750 struct rio_channel *ch; in cm_chan_accept() local
1761 ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to); in cm_chan_accept()
1762 if (IS_ERR(ch)) in cm_chan_accept()
1763 return PTR_ERR(ch); in cm_chan_accept()
1764 ch->filp = filp; in cm_chan_accept()
1767 ch->id, current->comm, task_pid_nr(current)); in cm_chan_accept()
1861 struct rio_channel *ch; in cm_chan_msg_rcv() local
1872 ch = riocm_get_channel(msg.ch_num); in cm_chan_msg_rcv()
1873 if (!ch) in cm_chan_msg_rcv()
1878 ret = riocm_ch_receive(ch, &buf, rxto); in cm_chan_msg_rcv()
1887 riocm_ch_free_rxbuf(ch, buf); in cm_chan_msg_rcv()
1889 riocm_put_channel(ch); in cm_chan_msg_rcv()
1993 struct rio_channel *ch, *_c; in riocm_remove_dev() local
2041 idr_for_each_entry(&ch_idr, ch, i) { in riocm_remove_dev()
2042 if (ch && ch->rdev == rdev) { in riocm_remove_dev()
2044 riocm_exch(ch, RIO_CM_DISCONNECT); in riocm_remove_dev()
2045 idr_remove(&ch_idr, ch->id); in riocm_remove_dev()
2046 list_add(&ch->ch_node, &list); in riocm_remove_dev()
2052 list_for_each_entry_safe(ch, _c, &list, ch_node) { in riocm_remove_dev()
2053 list_del(&ch->ch_node); in riocm_remove_dev()
2054 riocm_ch_close(ch); in riocm_remove_dev()
2181 struct rio_channel *ch, *_c; in riocm_remove_mport() local
2206 idr_for_each_entry(&ch_idr, ch, i) { in riocm_remove_mport()
2207 if (ch->cmdev == cm) { in riocm_remove_mport()
2209 mport->name, ch->id); in riocm_remove_mport()
2210 idr_remove(&ch_idr, ch->id); in riocm_remove_mport()
2211 list_add(&ch->ch_node, &list); in riocm_remove_mport()
2217 list_for_each_entry_safe(ch, _c, &list, ch_node) { in riocm_remove_mport()
2218 list_del(&ch->ch_node); in riocm_remove_mport()
2219 riocm_ch_close(ch); in riocm_remove_mport()
2243 struct rio_channel *ch; in rio_cm_shutdown() local
2257 idr_for_each_entry(&ch_idr, ch, i) { in rio_cm_shutdown()
2258 if (ch->state == RIO_CM_CONNECTED) { in rio_cm_shutdown()
2259 riocm_debug(EXIT, "close ch %d", ch->id); in rio_cm_shutdown()
2260 idr_remove(&ch_idr, ch->id); in rio_cm_shutdown()
2261 list_add(&ch->ch_node, &list); in rio_cm_shutdown()
2266 list_for_each_entry(ch, &list, ch_node) in rio_cm_shutdown()
2267 riocm_send_close(ch); in rio_cm_shutdown()