Home
last modified time | relevance | path

Searched refs:qdev (Results 1 – 25 of 33) sorted by relevance

12

/linux-6.6.21/drivers/net/ethernet/qlogic/
Dqla3xxx.c103 static int ql_sem_spinlock(struct ql3_adapter *qdev, in ql_sem_spinlock() argument
107 qdev->mem_map_registers; in ql_sem_spinlock()
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
125 qdev->mem_map_registers; in ql_sem_unlock()
130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) in ql_sem_lock() argument
133 qdev->mem_map_registers; in ql_sem_lock()
144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) in ql_wait_for_drvr_lock() argument
149 if (ql_sem_lock(qdev, in ql_wait_for_drvr_lock()
151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) in ql_wait_for_drvr_lock()
153 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_wait_for_drvr_lock()
[all …]
/linux-6.6.21/drivers/gpu/drm/qxl/
Dqxl_kms.c36 static bool qxl_check_device(struct qxl_device *qdev) in qxl_check_device() argument
38 struct qxl_rom *rom = qdev->rom; in qxl_check_device()
53 qdev->vram_size = rom->surface0_area_size; in qxl_check_device()
58 static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot) in setup_hw_slot() argument
60 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr; in setup_hw_slot()
61 qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size; in setup_hw_slot()
62 qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index); in setup_hw_slot()
65 static void setup_slot(struct qxl_device *qdev, in setup_slot() argument
79 setup_hw_slot(qdev, slot); in setup_slot()
81 slot->generation = qdev->rom->slot_generation; in setup_slot()
[all …]
Dqxl_cmd.c35 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
178 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_command_ring_release() argument
184 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); in qxl_push_command_ring_release()
186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); in qxl_push_command_ring_release()
190 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_cursor_ring_release() argument
196 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); in qxl_push_cursor_ring_release()
198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); in qxl_push_cursor_ring_release()
201 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) in qxl_queue_garbage_collect() argument
203 if (!qxl_check_idle(qdev->release_ring)) { in qxl_queue_garbage_collect()
204 schedule_work(&qdev->gc_work); in qxl_queue_garbage_collect()
[all …]
Dqxl_display.c49 static int qxl_alloc_client_monitors_config(struct qxl_device *qdev, in qxl_alloc_client_monitors_config() argument
52 if (qdev->client_monitors_config && in qxl_alloc_client_monitors_config()
53 count > qdev->client_monitors_config->count) { in qxl_alloc_client_monitors_config()
54 kfree(qdev->client_monitors_config); in qxl_alloc_client_monitors_config()
55 qdev->client_monitors_config = NULL; in qxl_alloc_client_monitors_config()
57 if (!qdev->client_monitors_config) { in qxl_alloc_client_monitors_config()
58 qdev->client_monitors_config = kzalloc( in qxl_alloc_client_monitors_config()
59 struct_size(qdev->client_monitors_config, in qxl_alloc_client_monitors_config()
61 if (!qdev->client_monitors_config) in qxl_alloc_client_monitors_config()
64 qdev->client_monitors_config->count = count; in qxl_alloc_client_monitors_config()
[all …]
Dqxl_irq.c35 struct qxl_device *qdev = to_qxl(dev); in qxl_irq_handler() local
38 pending = xchg(&qdev->ram_header->int_pending, 0); in qxl_irq_handler()
43 atomic_inc(&qdev->irq_received); in qxl_irq_handler()
46 atomic_inc(&qdev->irq_received_display); in qxl_irq_handler()
47 wake_up_all(&qdev->display_event); in qxl_irq_handler()
48 qxl_queue_garbage_collect(qdev, false); in qxl_irq_handler()
51 atomic_inc(&qdev->irq_received_cursor); in qxl_irq_handler()
52 wake_up_all(&qdev->cursor_event); in qxl_irq_handler()
55 atomic_inc(&qdev->irq_received_io_cmd); in qxl_irq_handler()
56 wake_up_all(&qdev->io_cmd_event); in qxl_irq_handler()
[all …]
Dqxl_drv.h261 int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
262 void qxl_device_fini(struct qxl_device *qdev);
264 int qxl_modeset_init(struct qxl_device *qdev);
265 void qxl_modeset_fini(struct qxl_device *qdev);
267 int qxl_bo_init(struct qxl_device *qdev);
268 void qxl_bo_fini(struct qxl_device *qdev);
270 void qxl_reinit_memslots(struct qxl_device *qdev);
271 int qxl_surf_evict(struct qxl_device *qdev);
272 int qxl_vram_evict(struct qxl_device *qdev);
283 qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, in qxl_bo_physical_address() argument
[all …]
Dqxl_release.c60 struct qxl_device *qdev; in qxl_fence_wait() local
63 qdev = container_of(fence->lock, struct qxl_device, release_lock); in qxl_fence_wait()
65 if (!wait_event_timeout(qdev->release_event, in qxl_fence_wait()
67 (qxl_io_notify_oom(qdev), 0)), in qxl_fence_wait()
84 qxl_release_alloc(struct qxl_device *qdev, int type, in qxl_release_alloc() argument
103 spin_lock(&qdev->release_idr_lock); in qxl_release_alloc()
104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); in qxl_release_alloc()
105 release->base.seqno = ++qdev->release_seqno; in qxl_release_alloc()
106 spin_unlock(&qdev->release_idr_lock); in qxl_release_alloc()
137 qxl_release_free(struct qxl_device *qdev, in qxl_release_free() argument
[all …]
Dqxl_ttm.c43 struct qxl_device *qdev; in qxl_get_qdev() local
46 qdev = container_of(mman, struct qxl_device, mman); in qxl_get_qdev()
47 return qdev; in qxl_get_qdev()
76 struct qxl_device *qdev = qxl_get_qdev(bdev); in qxl_ttm_io_mem_reserve() local
84 mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; in qxl_ttm_io_mem_reserve()
90 qdev->surfaceram_base; in qxl_ttm_io_mem_reserve()
127 struct qxl_device *qdev; in qxl_bo_move_notify() local
132 qdev = to_qxl(qbo->tbo.base.dev); in qxl_bo_move_notify()
135 qxl_surface_evict(qdev, qbo, new_mem ? true : false); in qxl_bo_move_notify()
185 static int qxl_ttm_init_mem_type(struct qxl_device *qdev, in qxl_ttm_init_mem_type() argument
[all …]
Dqxl_debugfs.c42 struct qxl_device *qdev = to_qxl(node->minor->dev); in qxl_debugfs_irq_received() local
44 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); in qxl_debugfs_irq_received()
45 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); in qxl_debugfs_irq_received()
46 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); in qxl_debugfs_irq_received()
47 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); in qxl_debugfs_irq_received()
48 seq_printf(m, "%d\n", qdev->irq_received_error); in qxl_debugfs_irq_received()
56 struct qxl_device *qdev = to_qxl(node->minor->dev); in qxl_debugfs_buffers_info() local
59 list_for_each_entry(bo, &qdev->gem.objects, list) { in qxl_debugfs_buffers_info()
99 void qxl_debugfs_add_files(struct qxl_device *qdev, in qxl_debugfs_add_files() argument
105 for (i = 0; i < qdev->debugfs_count; i++) { in qxl_debugfs_add_files()
[all …]
Dqxl_drv.c79 struct qxl_device *qdev; in qxl_pci_probe() local
88 qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver, in qxl_pci_probe()
90 if (IS_ERR(qdev)) { in qxl_pci_probe()
111 ret = qxl_device_init(qdev, pdev); in qxl_pci_probe()
115 ret = qxl_modeset_init(qdev); in qxl_pci_probe()
119 drm_kms_helper_poll_init(&qdev->ddev); in qxl_pci_probe()
122 ret = drm_dev_register(&qdev->ddev, ent->driver_data); in qxl_pci_probe()
126 drm_fbdev_generic_setup(&qdev->ddev, 32); in qxl_pci_probe()
130 qxl_modeset_fini(qdev); in qxl_pci_probe()
132 qxl_device_fini(qdev); in qxl_pci_probe()
[all …]
Dqxl_gem.c34 struct qxl_device *qdev; in qxl_gem_object_free() local
37 qdev = to_qxl(gobj->dev); in qxl_gem_object_free()
39 qxl_surface_evict(qdev, qobj, false); in qxl_gem_object_free()
45 int qxl_gem_object_create(struct qxl_device *qdev, int size, in qxl_gem_object_create() argument
58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo); in qxl_gem_object_create()
68 mutex_lock(&qdev->gem.mutex); in qxl_gem_object_create()
69 list_add_tail(&qbo->list, &qdev->gem.objects); in qxl_gem_object_create()
70 mutex_unlock(&qdev->gem.mutex); in qxl_gem_object_create()
81 int qxl_gem_object_create_with_handle(struct qxl_device *qdev, in qxl_gem_object_create_with_handle() argument
94 r = qxl_gem_object_create(qdev, size, 0, in qxl_gem_object_create_with_handle()
[all …]
Dqxl_draw.c31 static int alloc_clips(struct qxl_device *qdev, in alloc_clips() argument
38 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); in alloc_clips()
44 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, in drawable_set_clipping() argument
65 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) in alloc_drawable() argument
67 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), in alloc_drawable()
72 free_drawable(struct qxl_device *qdev, struct qxl_release *release) in free_drawable() argument
74 qxl_release_free(qdev, release); in free_drawable()
79 make_drawable(struct qxl_device *qdev, int surface, uint8_t type, in make_drawable() argument
86 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); in make_drawable()
114 drawable->mm_time = qdev->rom->mm_clock; in make_drawable()
[all …]
Dqxl_object.c38 struct qxl_device *qdev; in qxl_ttm_bo_destroy() local
41 qdev = to_qxl(bo->tbo.base.dev); in qxl_ttm_bo_destroy()
43 qxl_surface_evict(qdev, bo, false); in qxl_ttm_bo_destroy()
45 mutex_lock(&qdev->gem.mutex); in qxl_ttm_bo_destroy()
47 mutex_unlock(&qdev->gem.mutex); in qxl_ttm_bo_destroy()
109 int qxl_bo_create(struct qxl_device *qdev, unsigned long size, in qxl_bo_create() argument
128 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create()
144 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type, in qxl_bo_create()
149 dev_err(qdev->ddev.dev, in qxl_bo_create()
207 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, in qxl_bo_kmap_atomic_page() argument
[all …]
Dqxl_ioctl.c38 struct qxl_device *qdev = to_qxl(dev); in qxl_alloc_ioctl() local
48 ret = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_alloc_ioctl()
64 struct qxl_device *qdev = to_qxl(dev); in qxl_map_ioctl() local
67 return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle, in qxl_map_ioctl()
85 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_reloc() argument
89 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_reloc()
90 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, in apply_reloc()
93 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); in apply_reloc()
97 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_surf_reloc() argument
105 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_surf_reloc()
[all …]
Dqxl_image.c33 qxl_allocate_chunk(struct qxl_device *qdev, in qxl_allocate_chunk() argument
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk()
56 qxl_image_alloc_objects(struct qxl_device *qdev, in qxl_image_alloc_objects() argument
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); in qxl_image_alloc_objects()
76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); in qxl_image_alloc_objects()
86 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) in qxl_image_free_objects() argument
100 qxl_image_init_helper(struct qxl_device *qdev, in qxl_image_init_helper() argument
127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); in qxl_image_init_helper()
132 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); in qxl_image_init_helper()
146 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); in qxl_image_init_helper()
[all …]
/linux-6.6.21/drivers/staging/qlge/
Dqlge_mpi.c4 int qlge_unpause_mpi_risc(struct qlge_adapter *qdev) in qlge_unpause_mpi_risc() argument
9 tmp = qlge_read32(qdev, CSR); in qlge_unpause_mpi_risc()
13 qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); in qlge_unpause_mpi_risc()
17 int qlge_pause_mpi_risc(struct qlge_adapter *qdev) in qlge_pause_mpi_risc() argument
23 qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE); in qlge_pause_mpi_risc()
25 tmp = qlge_read32(qdev, CSR); in qlge_pause_mpi_risc()
33 int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev) in qlge_hard_reset_mpi_risc() argument
39 qlge_write32(qdev, CSR, CSR_CMD_SET_RST); in qlge_hard_reset_mpi_risc()
41 tmp = qlge_read32(qdev, CSR); in qlge_hard_reset_mpi_risc()
43 qlge_write32(qdev, CSR, CSR_CMD_CLR_RST); in qlge_hard_reset_mpi_risc()
[all …]
Dqlge_main.c102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask) in qlge_sem_trylock() argument
132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); in qlge_sem_trylock()
136 qlge_write32(qdev, SEM, sem_bits | sem_mask); in qlge_sem_trylock()
137 return !(qlge_read32(qdev, SEM) & sem_bits); in qlge_sem_trylock()
140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask) in qlge_sem_spinlock() argument
145 if (!qlge_sem_trylock(qdev, sem_mask)) in qlge_sem_spinlock()
152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask) in qlge_sem_unlock() argument
154 qlge_write32(qdev, SEM, sem_mask); in qlge_sem_unlock()
155 qlge_read32(qdev, SEM); /* flush */ in qlge_sem_unlock()
163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit) in qlge_wait_reg_rdy() argument
[all …]
Dqlge_ethtool.c186 static int qlge_update_ring_coalescing(struct qlge_adapter *qdev) in qlge_update_ring_coalescing() argument
192 if (!netif_running(qdev->ndev)) in qlge_update_ring_coalescing()
198 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; in qlge_update_ring_coalescing()
199 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || in qlge_update_ring_coalescing()
200 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) { in qlge_update_ring_coalescing()
201 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { in qlge_update_ring_coalescing()
202 rx_ring = &qdev->rx_ring[i]; in qlge_update_ring_coalescing()
204 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); in qlge_update_ring_coalescing()
206 cpu_to_le16(qdev->tx_max_coalesced_frames); in qlge_update_ring_coalescing()
208 status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb), in qlge_update_ring_coalescing()
[all …]
Dqlge_dbg.c9 static u32 qlge_read_other_func_reg(struct qlge_adapter *qdev, in qlge_read_other_func_reg() argument
18 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) in qlge_read_other_func_reg()
20 status = qlge_read_mpi_reg(qdev, register_to_read, &reg_val); in qlge_read_other_func_reg()
28 static int qlge_write_other_func_reg(struct qlge_adapter *qdev, in qlge_write_other_func_reg() argument
35 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) in qlge_write_other_func_reg()
38 return qlge_write_mpi_reg(qdev, register_to_read, reg_val); in qlge_write_other_func_reg()
41 static int qlge_wait_other_func_reg_rdy(struct qlge_adapter *qdev, u32 reg, in qlge_wait_other_func_reg_rdy() argument
48 temp = qlge_read_other_func_reg(qdev, reg); in qlge_wait_other_func_reg_rdy()
60 static int qlge_read_other_func_serdes_reg(struct qlge_adapter *qdev, u32 reg, in qlge_read_other_func_serdes_reg() argument
66 status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, in qlge_read_other_func_serdes_reg()
[all …]
/linux-6.6.21/drivers/accel/qaic/
Dqaic_drv.c44 static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
45 static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
59 struct qaic_device *qdev = qddev->qdev; in qaic_open() local
64 rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_open()
65 if (qdev->in_reset) { in qaic_open()
95 srcu_read_unlock(&qdev->dev_lock, rcu_id); in qaic_open()
104 srcu_read_unlock(&qdev->dev_lock, rcu_id); in qaic_open()
112 struct qaic_device *qdev; in qaic_postclose() local
120 qdev = qddev->qdev; in qaic_postclose()
121 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_postclose()
[all …]
Dqaic_control.c236 struct qaic_device *qdev; member
297 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources, in save_dbc_buf() argument
303 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use); in save_dbc_buf()
304 qdev->dbc[dbc_id].req_q_base = resources->buf; in save_dbc_buf()
305 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base; in save_dbc_buf()
306 qdev->dbc[dbc_id].dma_addr = resources->dma_addr; in save_dbc_buf()
307 qdev->dbc[dbc_id].total_size = resources->total_size; in save_dbc_buf()
308 qdev->dbc[dbc_id].nelem = resources->nelem; in save_dbc_buf()
309 enable_dbc(qdev, dbc_id, usr); in save_dbc_buf()
310 qdev->dbc[dbc_id].in_use = true; in save_dbc_buf()
[all …]
Dqaic_data.c165 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, in clone_range_of_sgt_for_slice() argument
245 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, in encode_reqs() argument
380 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, in qaic_map_one_slice() argument
387 ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset); in qaic_map_one_slice()
411 ret = encode_reqs(qdev, slice, slice_ent); in qaic_map_one_slice()
433 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size) in create_sgt() argument
547 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, in qaic_validate_req() argument
657 struct qaic_device *qdev; in qaic_create_bo_ioctl() local
677 qdev = usr->qddev->qdev; in qaic_create_bo_ioctl()
678 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_create_bo_ioctl()
[all …]
Dqaic.h48 struct qaic_device *qdev; member
141 struct qaic_device *qdev; member
251 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
257 int qaic_control_open(struct qaic_device *qdev);
258 void qaic_control_close(struct qaic_device *qdev);
259 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
263 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
264 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
265 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
266 void release_dbc(struct qaic_device *qdev, u32 dbc_id);
[all …]
/linux-6.6.21/net/qrtr/
Dsmd.c23 struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev); in qcom_smd_qrtr_callback() local
26 if (!qdev) in qcom_smd_qrtr_callback()
29 rc = qrtr_endpoint_post(&qdev->ep, data, len); in qcom_smd_qrtr_callback()
31 dev_err(qdev->dev, "invalid ipcrouter packet\n"); in qcom_smd_qrtr_callback()
42 struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep); in qcom_smd_qrtr_send() local
49 rc = rpmsg_send(qdev->channel, skb->data, skb->len); in qcom_smd_qrtr_send()
61 struct qrtr_smd_dev *qdev; in qcom_smd_qrtr_probe() local
64 qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL); in qcom_smd_qrtr_probe()
65 if (!qdev) in qcom_smd_qrtr_probe()
68 qdev->channel = rpdev->ept; in qcom_smd_qrtr_probe()
[all …]
Dmhi.c24 struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev); in qcom_mhi_qrtr_dl_callback() local
27 if (!qdev || mhi_res->transaction_status) in qcom_mhi_qrtr_dl_callback()
30 rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr, in qcom_mhi_qrtr_dl_callback()
33 dev_err(qdev->dev, "invalid ipcrouter packet\n"); in qcom_mhi_qrtr_dl_callback()
50 struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); in qcom_mhi_qrtr_send() local
60 rc = mhi_queue_skb(qdev->mhi_dev, DMA_TO_DEVICE, skb, skb->len, in qcom_mhi_qrtr_send()
78 struct qrtr_mhi_dev *qdev; in qcom_mhi_qrtr_probe() local
81 qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL); in qcom_mhi_qrtr_probe()
82 if (!qdev) in qcom_mhi_qrtr_probe()
85 qdev->mhi_dev = mhi_dev; in qcom_mhi_qrtr_probe()
[all …]

12