Lines Matching refs:cm

203 	struct cm_dev *cm;  member
226 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
300 static void *riocm_rx_get_msg(struct cm_dev *cm) in riocm_rx_get_msg() argument
305 msg = rio_get_inb_message(cm->mport, cmbox); in riocm_rx_get_msg()
308 if (cm->rx_buf[i] == msg) { in riocm_rx_get_msg()
309 cm->rx_buf[i] = NULL; in riocm_rx_get_msg()
310 cm->rx_slots++; in riocm_rx_get_msg()
329 static void riocm_rx_fill(struct cm_dev *cm, int nent) in riocm_rx_fill() argument
333 if (cm->rx_slots == 0) in riocm_rx_fill()
336 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { in riocm_rx_fill()
337 if (cm->rx_buf[i] == NULL) { in riocm_rx_fill()
338 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); in riocm_rx_fill()
339 if (cm->rx_buf[i] == NULL) in riocm_rx_fill()
341 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); in riocm_rx_fill()
342 cm->rx_slots--; in riocm_rx_fill()
354 static void riocm_rx_free(struct cm_dev *cm) in riocm_rx_free() argument
359 if (cm->rx_buf[i] != NULL) { in riocm_rx_free()
360 kfree(cm->rx_buf[i]); in riocm_rx_free()
361 cm->rx_buf[i] = NULL; in riocm_rx_free()
376 static int riocm_req_handler(struct cm_dev *cm, void *req_data) in riocm_req_handler() argument
404 req->cmdev = cm; in riocm_req_handler()
486 static void rio_cm_handler(struct cm_dev *cm, void *data) in rio_cm_handler() argument
490 if (!rio_mport_is_running(cm->mport)) in rio_cm_handler()
500 riocm_req_handler(cm, data); in rio_cm_handler()
526 static int rio_rx_data_handler(struct cm_dev *cm, void *buf) in rio_rx_data_handler() argument
582 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); in rio_ibmsg_handler() local
586 if (!rio_mport_is_running(cm->mport)) in rio_ibmsg_handler()
590 mutex_lock(&cm->rx_lock); in rio_ibmsg_handler()
591 data = riocm_rx_get_msg(cm); in rio_ibmsg_handler()
593 riocm_rx_fill(cm, 1); in rio_ibmsg_handler()
594 mutex_unlock(&cm->rx_lock); in rio_ibmsg_handler()
611 rio_rx_data_handler(cm, data); in rio_ibmsg_handler()
613 rio_cm_handler(cm, data); in rio_ibmsg_handler()
620 struct cm_dev *cm = dev_id; in riocm_inb_msg_event() local
622 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) in riocm_inb_msg_event()
623 queue_work(cm->rx_wq, &cm->rx_work); in riocm_inb_msg_event()
635 static void rio_txcq_handler(struct cm_dev *cm, int slot) in rio_txcq_handler() argument
644 cm->mport->id, slot, cm->tx_cnt); in rio_txcq_handler()
646 spin_lock(&cm->tx_lock); in rio_txcq_handler()
647 ack_slot = cm->tx_ack_slot; in rio_txcq_handler()
652 while (cm->tx_cnt && ((ack_slot != slot) || in rio_txcq_handler()
653 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { in rio_txcq_handler()
655 cm->tx_buf[ack_slot] = NULL; in rio_txcq_handler()
658 cm->tx_cnt--; in rio_txcq_handler()
661 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) in rio_txcq_handler()
662 riocm_error("tx_cnt %d out of sync", cm->tx_cnt); in rio_txcq_handler()
664 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); in rio_txcq_handler()
666 cm->tx_ack_slot = ack_slot; in rio_txcq_handler()
671 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { in rio_txcq_handler()
675 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { in rio_txcq_handler()
677 cm->tx_buf[cm->tx_slot] = req->buffer; in rio_txcq_handler()
678 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, in rio_txcq_handler()
683 ++cm->tx_cnt; in rio_txcq_handler()
684 ++cm->tx_slot; in rio_txcq_handler()
685 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); in rio_txcq_handler()
686 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) in rio_txcq_handler()
691 spin_unlock(&cm->tx_lock); in rio_txcq_handler()
697 struct cm_dev *cm = dev_id; in riocm_outb_msg_event() local
699 if (cm && rio_mport_is_running(cm->mport)) in riocm_outb_msg_event()
700 rio_txcq_handler(cm, slot); in riocm_outb_msg_event()
703 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, in riocm_queue_req() argument
717 spin_lock_irqsave(&cm->tx_lock, flags); in riocm_queue_req()
718 list_add_tail(&treq->node, &cm->tx_reqs); in riocm_queue_req()
719 spin_unlock_irqrestore(&cm->tx_lock, flags); in riocm_queue_req()
733 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, in riocm_post_send() argument
739 spin_lock_irqsave(&cm->tx_lock, flags); in riocm_post_send()
741 if (cm->mport == NULL) { in riocm_post_send()
746 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { in riocm_post_send()
752 cm->tx_buf[cm->tx_slot] = buffer; in riocm_post_send()
753 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); in riocm_post_send()
756 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); in riocm_post_send()
758 ++cm->tx_cnt; in riocm_post_send()
759 ++cm->tx_slot; in riocm_post_send()
760 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); in riocm_post_send()
763 spin_unlock_irqrestore(&cm->tx_lock, flags); in riocm_post_send()
942 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, in riocm_ch_connect() argument
959 ch->cmdev = cm; in riocm_ch_connect()
962 ch->loc_destid = cm->mport->host_deviceid; in riocm_ch_connect()
989 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); in riocm_ch_connect()
994 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); in riocm_ch_connect()
1231 struct cm_dev *cm; in riocm_ch_bind() local
1238 list_for_each_entry(cm, &cm_dev_list, list) { in riocm_ch_bind()
1239 if ((cm->mport->id == mport_id) && in riocm_ch_bind()
1240 rio_mport_is_running(cm->mport)) { in riocm_ch_bind()
1262 ch->cmdev = cm; in riocm_ch_bind()
1263 ch->loc_destid = cm->mport->host_deviceid; in riocm_ch_bind()
1534 struct cm_dev *cm; in cm_ep_get_list_size() local
1543 list_for_each_entry(cm, &cm_dev_list, list) { in cm_ep_get_list_size()
1544 if (cm->mport->id == mport_id) { in cm_ep_get_list_size()
1545 count = cm->npeers; in cm_ep_get_list_size()
1562 struct cm_dev *cm; in cm_ep_get_list() local
1579 list_for_each_entry(cm, &cm_dev_list, list) in cm_ep_get_list()
1580 if (cm->mport->id == (u8)info[1]) in cm_ep_get_list()
1587 nent = min(info[0], cm->npeers); in cm_ep_get_list()
1596 list_for_each_entry(peer, &cm->peers, node) { in cm_ep_get_list()
1621 struct cm_dev *cm; in cm_mport_get_list() local
1636 list_for_each_entry(cm, &cm_dev_list, list) { in cm_mport_get_list()
1638 *entry_ptr = (cm->mport->id << 16) | in cm_mport_get_list()
1639 cm->mport->host_deviceid; in cm_mport_get_list()
1781 struct cm_dev *cm; in cm_chan_connect() local
1793 list_for_each_entry(cm, &cm_dev_list, list) { in cm_chan_connect()
1794 if (cm->mport->id == chan.mport_id) { in cm_chan_connect()
1803 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { in cm_chan_connect()
1811 list_for_each_entry(peer, &cm->peers, node) { in cm_chan_connect()
1823 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); in cm_chan_connect()
1948 struct cm_dev *cm; in riocm_add_dev() local
1962 list_for_each_entry(cm, &cm_dev_list, list) { in riocm_add_dev()
1963 if (cm->mport == rdev->net->hport) in riocm_add_dev()
1973 list_add_tail(&peer->node, &cm->peers); in riocm_add_dev()
1974 cm->npeers++; in riocm_add_dev()
1991 struct cm_dev *cm; in riocm_remove_dev() local
2006 list_for_each_entry(cm, &cm_dev_list, list) { in riocm_remove_dev()
2007 if (cm->mport == rdev->net->hport) { in riocm_remove_dev()
2020 list_for_each_entry(peer, &cm->peers, node) { in riocm_remove_dev()
2025 cm->npeers--; in riocm_remove_dev()
2100 struct cm_dev *cm; in riocm_add_mport() local
2105 cm = kzalloc(sizeof(*cm), GFP_KERNEL); in riocm_add_mport()
2106 if (!cm) in riocm_add_mport()
2109 cm->mport = mport; in riocm_add_mport()
2111 rc = rio_request_outb_mbox(mport, cm, cmbox, in riocm_add_mport()
2116 kfree(cm); in riocm_add_mport()
2120 rc = rio_request_inb_mbox(mport, cm, cmbox, in riocm_add_mport()
2126 kfree(cm); in riocm_add_mport()
2130 cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); in riocm_add_mport()
2131 if (!cm->rx_wq) { in riocm_add_mport()
2134 kfree(cm); in riocm_add_mport()
2143 cm->rx_buf[i] = NULL; in riocm_add_mport()
2145 cm->rx_slots = RIOCM_RX_RING_SIZE; in riocm_add_mport()
2146 mutex_init(&cm->rx_lock); in riocm_add_mport()
2147 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); in riocm_add_mport()
2148 INIT_WORK(&cm->rx_work, rio_ibmsg_handler); in riocm_add_mport()
2150 cm->tx_slot = 0; in riocm_add_mport()
2151 cm->tx_cnt = 0; in riocm_add_mport()
2152 cm->tx_ack_slot = 0; in riocm_add_mport()
2153 spin_lock_init(&cm->tx_lock); in riocm_add_mport()
2155 INIT_LIST_HEAD(&cm->peers); in riocm_add_mport()
2156 cm->npeers = 0; in riocm_add_mport()
2157 INIT_LIST_HEAD(&cm->tx_reqs); in riocm_add_mport()
2160 list_add_tail(&cm->list, &cm_dev_list); in riocm_add_mport()
2179 struct cm_dev *cm; in riocm_remove_mport() local
2190 list_for_each_entry(cm, &cm_dev_list, list) { in riocm_remove_mport()
2191 if (cm->mport == mport) { in riocm_remove_mport()
2192 list_del(&cm->list); in riocm_remove_mport()
2201 flush_workqueue(cm->rx_wq); in riocm_remove_mport()
2202 destroy_workqueue(cm->rx_wq); in riocm_remove_mport()
2207 if (ch->cmdev == cm) { in riocm_remove_mport()
2227 if (!list_empty(&cm->peers)) in riocm_remove_mport()
2229 list_for_each_entry_safe(peer, temp, &cm->peers, node) { in riocm_remove_mport()
2235 riocm_rx_free(cm); in riocm_remove_mport()
2236 kfree(cm); in riocm_remove_mport()