/linux-6.6.21/drivers/gpu/drm/i915/gt/ |
D | intel_context.h | 22 #define CE_TRACE(ce, fmt, ...) do { \ argument 23 const struct intel_context *ce__ = (ce); \ 33 void intel_context_init(struct intel_context *ce, 35 void intel_context_fini(struct intel_context *ce); 43 int intel_context_alloc_state(struct intel_context *ce); 45 void intel_context_free(struct intel_context *ce); 47 int intel_context_reconfigure_sseu(struct intel_context *ce, 52 static inline bool intel_context_is_child(struct intel_context *ce) in intel_context_is_child() argument 54 return !!ce->parallel.parent; in intel_context_is_child() 57 static inline bool intel_context_is_parent(struct intel_context *ce) in intel_context_is_parent() argument [all …]
|
D | intel_context.c | 26 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); in rcu_context_free() local 28 trace_intel_context_free(ce); in rcu_context_free() 29 kmem_cache_free(slab_ce, ce); in rcu_context_free() 32 void intel_context_free(struct intel_context *ce) in intel_context_free() argument 34 call_rcu(&ce->rcu, rcu_context_free); in intel_context_free() 40 struct intel_context *ce; in intel_context_create() local 42 ce = intel_context_alloc(); in intel_context_create() 43 if (!ce) in intel_context_create() 46 intel_context_init(ce, engine); in intel_context_create() 47 trace_intel_context_create(ce); in intel_context_create() [all …]
|
D | intel_lrc.c | 849 const struct intel_context *ce, in init_common_regs() argument 865 regs[CTX_TIMESTAMP] = ce->stats.runtime.last; in init_common_regs() 929 const struct intel_context *ce, in __lrc_init_regs() argument 949 init_common_regs(regs, ce, engine, inhibit); in __lrc_init_regs() 950 init_ppgtt_regs(regs, vm_alias(ce->vm)); in __lrc_init_regs() 957 void lrc_init_regs(const struct intel_context *ce, in lrc_init_regs() argument 961 __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit); in lrc_init_regs() 964 void lrc_reset_regs(const struct intel_context *ce, in lrc_reset_regs() argument 967 __reset_stop_ring(ce->lrc_reg_state, engine); in lrc_reset_regs() 995 static u32 context_wa_bb_offset(const struct intel_context *ce) in context_wa_bb_offset() argument [all …]
|
D | intel_lrc.h | 36 int lrc_alloc(struct intel_context *ce, 38 void lrc_reset(struct intel_context *ce); 39 void lrc_fini(struct intel_context *ce); 43 lrc_pre_pin(struct intel_context *ce, 48 lrc_pin(struct intel_context *ce, 51 void lrc_unpin(struct intel_context *ce); 52 void lrc_post_unpin(struct intel_context *ce); 54 void lrc_init_state(struct intel_context *ce, 58 void lrc_init_regs(const struct intel_context *ce, 61 void lrc_reset_regs(const struct intel_context *ce, [all …]
|
D | intel_engine_pm.c | 35 static void dbg_poison_ce(struct intel_context *ce) in dbg_poison_ce() argument 40 if (ce->state) { in dbg_poison_ce() 41 struct drm_i915_gem_object *obj = ce->state->obj; in dbg_poison_ce() 42 int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); in dbg_poison_ce() 62 struct intel_context *ce; in __engine_unpark() local 69 ce = engine->kernel_context; in __engine_unpark() 70 if (ce) { in __engine_unpark() 71 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); in __engine_unpark() 74 while (unlikely(intel_context_inflight(ce))) in __engine_unpark() 78 dbg_poison_ce(ce); in __engine_unpark() [all …]
|
D | intel_breadcrumbs.c | 81 struct intel_context *ce) in add_signaling_context() argument 83 lockdep_assert_held(&ce->signal_lock); in add_signaling_context() 86 list_add_rcu(&ce->signal_link, &b->signalers); in add_signaling_context() 91 struct intel_context *ce) in remove_signaling_context() argument 93 lockdep_assert_held(&ce->signal_lock); in remove_signaling_context() 95 if (!list_empty(&ce->signals)) in remove_signaling_context() 99 list_del_rcu(&ce->signal_link); in remove_signaling_context() 106 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument 108 if (rq->context != ce) in check_signal_order() 111 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order() [all …]
|
D | intel_context_sseu.c | 17 const struct intel_context *ce, in gen8_emit_rpcs_config() argument 27 offset = i915_ggtt_offset(ce->state) + in gen8_emit_rpcs_config() 41 gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu) in gen8_modify_rpcs() argument 46 lockdep_assert_held(&ce->pin_mutex); in gen8_modify_rpcs() 54 if (!intel_context_pin_if_active(ce)) in gen8_modify_rpcs() 57 rq = intel_engine_create_kernel_request(ce->engine); in gen8_modify_rpcs() 64 ret = intel_context_prepare_remote_request(ce, rq); in gen8_modify_rpcs() 66 ret = gen8_emit_rpcs_config(rq, ce, sseu); in gen8_modify_rpcs() 70 intel_context_unpin(ce); in gen8_modify_rpcs() 75 intel_context_reconfigure_sseu(struct intel_context *ce, in intel_context_reconfigure_sseu() argument [all …]
|
D | selftest_mocs.c | 26 struct intel_context *ce; in mocs_context_create() local 28 ce = intel_context_create(engine); in mocs_context_create() 29 if (IS_ERR(ce)) in mocs_context_create() 30 return ce; in mocs_context_create() 33 ce->ring_size = SZ_16K; in mocs_context_create() 35 return ce; in mocs_context_create() 218 struct intel_context *ce) in check_mocs_engine() argument 228 rq = intel_context_create_request(ce); in check_mocs_engine() 238 if (!err && ce->engine->class == RENDER_CLASS) in check_mocs_engine() 250 err = check_mocs_table(ce->engine, arg->mocs, &vaddr); in check_mocs_engine() [all …]
|
/linux-6.6.21/drivers/crypto/gemini/ |
D | sl3516-ce-core.c | 30 static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce) in sl3516_ce_desc_init() argument 35 ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL); in sl3516_ce_desc_init() 36 if (!ce->tx) in sl3516_ce_desc_init() 38 ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL); in sl3516_ce_desc_init() 39 if (!ce->rx) in sl3516_ce_desc_init() 43 ce->tx[i].frame_ctrl.bits.own = CE_CPU; in sl3516_ce_desc_init() 44 ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor); in sl3516_ce_desc_init() 46 ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx; in sl3516_ce_desc_init() 49 ce->rx[i].frame_ctrl.bits.own = CE_CPU; in sl3516_ce_desc_init() 50 ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor); in sl3516_ce_desc_init() [all …]
|
D | sl3516-ce-rng.c | 15 struct sl3516_ce_dev *ce; in sl3516_ce_rng_read() local 20 ce = container_of(rng, struct sl3516_ce_dev, trng); in sl3516_ce_rng_read() 23 ce->hwrng_stat_req++; in sl3516_ce_rng_read() 24 ce->hwrng_stat_bytes += max; in sl3516_ce_rng_read() 27 err = pm_runtime_get_sync(ce->dev); in sl3516_ce_rng_read() 29 pm_runtime_put_noidle(ce->dev); in sl3516_ce_rng_read() 34 *data = readl(ce->base + IPSEC_RAND_NUM_REG); in sl3516_ce_rng_read() 39 pm_runtime_put(ce->dev); in sl3516_ce_rng_read() 44 int sl3516_ce_rng_register(struct sl3516_ce_dev *ce) in sl3516_ce_rng_register() argument 48 ce->trng.name = "SL3516 Crypto Engine RNG"; in sl3516_ce_rng_register() [all …]
|
D | sl3516-ce-cipher.c | 29 struct sl3516_ce_dev *ce = op->ce; in sl3516_ce_need_fallback() local 35 ce->fallback_mod16++; in sl3516_ce_need_fallback() 44 ce->fallback_sg_count_tx++; in sl3516_ce_need_fallback() 49 ce->fallback_sg_count_rx++; in sl3516_ce_need_fallback() 56 ce->fallback_mod16++; in sl3516_ce_need_fallback() 60 ce->fallback_mod16++; in sl3516_ce_need_fallback() 64 ce->fallback_align16++; in sl3516_ce_need_fallback() 72 ce->fallback_mod16++; in sl3516_ce_need_fallback() 76 ce->fallback_mod16++; in sl3516_ce_need_fallback() 80 ce->fallback_align16++; in sl3516_ce_need_fallback() [all …]
|
/linux-6.6.21/drivers/gpu/drm/i915/gt/uc/ |
D | intel_guc_submission.c | 180 static inline void init_sched_state(struct intel_context *ce) in init_sched_state() argument 182 lockdep_assert_held(&ce->guc_state.lock); in init_sched_state() 183 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; in init_sched_state() 197 static bool sched_state_is_init(struct intel_context *ce) in sched_state_is_init() argument 199 return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT); in sched_state_is_init() 203 context_wait_for_deregister_to_register(struct intel_context *ce) in context_wait_for_deregister_to_register() argument 205 return ce->guc_state.sched_state & in context_wait_for_deregister_to_register() 210 set_context_wait_for_deregister_to_register(struct intel_context *ce) in set_context_wait_for_deregister_to_register() argument 212 lockdep_assert_held(&ce->guc_state.lock); in set_context_wait_for_deregister_to_register() 213 ce->guc_state.sched_state |= in set_context_wait_for_deregister_to_register() [all …]
|
D | selftest_guc.c | 23 static struct i915_request *nop_user_request(struct intel_context *ce, in nop_user_request() argument 29 rq = intel_context_create_request(ce); in nop_user_request() 57 struct intel_context *ce; in intel_guc_scrub_ctbs() local 67 ce = intel_context_create(engine); in intel_guc_scrub_ctbs() 68 if (IS_ERR(ce)) { in intel_guc_scrub_ctbs() 69 ret = PTR_ERR(ce); in intel_guc_scrub_ctbs() 70 gt_err(gt, "Failed to create context %d: %pe\n", i, ce); in intel_guc_scrub_ctbs() 76 ce->drop_schedule_enable = true; in intel_guc_scrub_ctbs() 79 ce->drop_schedule_disable = true; in intel_guc_scrub_ctbs() 82 ce->drop_deregister = true; in intel_guc_scrub_ctbs() [all …]
|
/linux-6.6.21/arch/arm64/crypto/ |
D | Makefile | 8 obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o 9 sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o 11 obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o 12 sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o 14 obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o 15 sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o 17 obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o 18 sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o 23 obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o 24 sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o [all …]
|
/linux-6.6.21/drivers/crypto/allwinner/sun8i-ce/ |
D | sun8i-ce-core.c | 153 int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce) in sun8i_ce_get_engine_number() argument 155 return atomic_inc_return(&ce->flow) % (MAXFLOW - 1); in sun8i_ce_get_engine_number() 158 int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) in sun8i_ce_run_task() argument 162 struct ce_task *cet = ce->chanlist[flow].tl; in sun8i_ce_run_task() 165 ce->chanlist[flow].stat_req++; in sun8i_ce_run_task() 168 mutex_lock(&ce->mlock); in sun8i_ce_run_task() 170 v = readl(ce->base + CE_ICR); in sun8i_ce_run_task() 172 writel(v, ce->base + CE_ICR); in sun8i_ce_run_task() 174 reinit_completion(&ce->chanlist[flow].complete); in sun8i_ce_run_task() 175 writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ); in sun8i_ce_run_task() [all …]
|
D | sun8i-ce-trng.c | 25 struct sun8i_ce_dev *ce; in sun8i_ce_trng_read() local 35 ce = container_of(rng, struct sun8i_ce_dev, trng); in sun8i_ce_trng_read() 46 ce->hwrng_stat_req++; in sun8i_ce_trng_read() 47 ce->hwrng_stat_bytes += todo; in sun8i_ce_trng_read() 50 dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); in sun8i_ce_trng_read() 51 if (dma_mapping_error(ce->dev, dma_dst)) { in sun8i_ce_trng_read() 52 dev_err(ce->dev, "Cannot DMA MAP DST\n"); in sun8i_ce_trng_read() 57 err = pm_runtime_resume_and_get(ce->dev); in sun8i_ce_trng_read() 61 mutex_lock(&ce->rnglock); in sun8i_ce_trng_read() 62 chan = &ce->chanlist[flow]; in sun8i_ce_trng_read() [all …]
|
D | sun8i-ce-prng.c | 61 struct sun8i_ce_dev *ce; in sun8i_ce_prng_generate() local 72 ce = algt->ce; in sun8i_ce_prng_generate() 75 dev_err(ce->dev, "not seeded\n"); in sun8i_ce_prng_generate() 89 dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__, in sun8i_ce_prng_generate() 97 dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); in sun8i_ce_prng_generate() 98 if (dma_mapping_error(ce->dev, dma_iv)) { in sun8i_ce_prng_generate() 99 dev_err(ce->dev, "Cannot DMA MAP IV\n"); in sun8i_ce_prng_generate() 104 dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); in sun8i_ce_prng_generate() 105 if (dma_mapping_error(ce->dev, dma_dst)) { in sun8i_ce_prng_generate() 106 dev_err(ce->dev, "Cannot DMA MAP DST\n"); in sun8i_ce_prng_generate() [all …]
|
/linux-6.6.21/fs/smb/client/ |
D | dfs_cache.c | 108 static inline bool cache_entry_expired(const struct cache_entry *ce) in cache_entry_expired() argument 113 return timespec64_compare(&ts, &ce->etime) >= 0; in cache_entry_expired() 116 static inline void free_tgts(struct cache_entry *ce) in free_tgts() argument 120 list_for_each_entry_safe(t, n, &ce->tlist, list) { in free_tgts() 127 static inline void flush_cache_ent(struct cache_entry *ce) in flush_cache_ent() argument 129 hlist_del_init(&ce->hlist); in flush_cache_ent() 130 kfree(ce->path); in flush_cache_ent() 131 free_tgts(ce); in flush_cache_ent() 133 kmem_cache_free(cache_slab, ce); in flush_cache_ent() 143 struct cache_entry *ce; in flush_cache_ents() local [all …]
|
/linux-6.6.21/drivers/base/power/ |
D | clock_ops.c | 147 static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) in __pm_clk_enable() argument 151 switch (ce->status) { in __pm_clk_enable() 153 ret = clk_prepare_enable(ce->clk); in __pm_clk_enable() 156 ret = clk_enable(ce->clk); in __pm_clk_enable() 162 ce->status = PCE_STATUS_ENABLED; in __pm_clk_enable() 165 __func__, ce->clk, ret); in __pm_clk_enable() 173 static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) in pm_clk_acquire() argument 175 if (!ce->clk) in pm_clk_acquire() 176 ce->clk = clk_get(dev, ce->con_id); in pm_clk_acquire() 177 if (IS_ERR(ce->clk)) { in pm_clk_acquire() [all …]
|
/linux-6.6.21/drivers/clocksource/ |
D | mps2-timer.c | 54 static int mps2_timer_shutdown(struct clock_event_device *ce) in mps2_timer_shutdown() argument 56 clockevent_mps2_writel(0, ce, TIMER_RELOAD); in mps2_timer_shutdown() 57 clockevent_mps2_writel(0, ce, TIMER_CTRL); in mps2_timer_shutdown() 62 static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce) in mps2_timer_set_next_event() argument 64 clockevent_mps2_writel(next, ce, TIMER_VALUE); in mps2_timer_set_next_event() 65 clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL); in mps2_timer_set_next_event() 70 static int mps2_timer_set_periodic(struct clock_event_device *ce) in mps2_timer_set_periodic() argument 72 u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick; in mps2_timer_set_periodic() 74 clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD); in mps2_timer_set_periodic() 75 clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE); in mps2_timer_set_periodic() [all …]
|
D | timer-digicolor.c | 58 struct clock_event_device ce; member 64 static struct digicolor_timer *dc_timer(struct clock_event_device *ce) in dc_timer() argument 66 return container_of(ce, struct digicolor_timer, ce); in dc_timer() 69 static inline void dc_timer_disable(struct clock_event_device *ce) in dc_timer_disable() argument 71 struct digicolor_timer *dt = dc_timer(ce); in dc_timer_disable() 75 static inline void dc_timer_enable(struct clock_event_device *ce, u32 mode) in dc_timer_enable() argument 77 struct digicolor_timer *dt = dc_timer(ce); in dc_timer_enable() 81 static inline void dc_timer_set_count(struct clock_event_device *ce, in dc_timer_set_count() argument 84 struct digicolor_timer *dt = dc_timer(ce); in dc_timer_set_count() 88 static int digicolor_clkevt_shutdown(struct clock_event_device *ce) in digicolor_clkevt_shutdown() argument [all …]
|
D | timer-sun5i.c | 58 static void sun5i_clkevt_sync(struct sun5i_timer *ce) in sun5i_clkevt_sync() argument 60 u32 old = readl(ce->base + TIMER_CNTVAL_LO_REG(1)); in sun5i_clkevt_sync() 62 while ((old - readl(ce->base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS) in sun5i_clkevt_sync() 66 static void sun5i_clkevt_time_stop(struct sun5i_timer *ce, u8 timer) in sun5i_clkevt_time_stop() argument 68 u32 val = readl(ce->base + TIMER_CTL_REG(timer)); in sun5i_clkevt_time_stop() 69 writel(val & ~TIMER_CTL_ENABLE, ce->base + TIMER_CTL_REG(timer)); in sun5i_clkevt_time_stop() 71 sun5i_clkevt_sync(ce); in sun5i_clkevt_time_stop() 74 static void sun5i_clkevt_time_setup(struct sun5i_timer *ce, u8 timer, u32 delay) in sun5i_clkevt_time_setup() argument 76 writel(delay, ce->base + TIMER_INTVAL_LO_REG(timer)); in sun5i_clkevt_time_setup() 79 static void sun5i_clkevt_time_start(struct sun5i_timer *ce, u8 timer, bool periodic) in sun5i_clkevt_time_start() argument [all …]
|
D | timer-rockchip.c | 43 struct clock_event_device ce; member 50 static inline struct rk_timer *rk_timer(struct clock_event_device *ce) in rk_timer() argument 52 return &container_of(ce, struct rk_clkevt, ce)->timer; in rk_timer() 78 struct clock_event_device *ce) in rk_timer_set_next_event() argument 80 struct rk_timer *timer = rk_timer(ce); in rk_timer_set_next_event() 89 static int rk_timer_shutdown(struct clock_event_device *ce) in rk_timer_shutdown() argument 91 struct rk_timer *timer = rk_timer(ce); in rk_timer_shutdown() 97 static int rk_timer_set_periodic(struct clock_event_device *ce) in rk_timer_set_periodic() argument 99 struct rk_timer *timer = rk_timer(ce); in rk_timer_set_periodic() 109 struct clock_event_device *ce = dev_id; in rk_timer_interrupt() local [all …]
|
/linux-6.6.21/drivers/of/ |
D | dynamic.c | 519 static void __of_changeset_entry_destroy(struct of_changeset_entry *ce) in __of_changeset_entry_destroy() argument 521 if (ce->action == OF_RECONFIG_ATTACH_NODE && in __of_changeset_entry_destroy() 522 of_node_check_flag(ce->np, OF_OVERLAY)) { in __of_changeset_entry_destroy() 523 if (kref_read(&ce->np->kobj.kref) > 1) { in __of_changeset_entry_destroy() 525 kref_read(&ce->np->kobj.kref), ce->np); in __of_changeset_entry_destroy() 527 of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET); in __of_changeset_entry_destroy() 531 of_node_put(ce->np); in __of_changeset_entry_destroy() 532 list_del(&ce->node); in __of_changeset_entry_destroy() 533 kfree(ce); in __of_changeset_entry_destroy() 536 static void __of_changeset_entry_invert(struct of_changeset_entry *ce, in __of_changeset_entry_invert() argument [all …]
|
/linux-6.6.21/arch/sparc/kernel/ |
D | time_32.c | 120 struct clock_event_device *ce = &timer_ce; in setup_timer_ce() local 124 ce->name = "timer_ce"; in setup_timer_ce() 125 ce->rating = 100; in setup_timer_ce() 126 ce->features = CLOCK_EVT_FEAT_PERIODIC; in setup_timer_ce() 127 ce->set_state_shutdown = timer_ce_shutdown; in setup_timer_ce() 128 ce->set_state_periodic = timer_ce_set_periodic; in setup_timer_ce() 129 ce->tick_resume = timer_ce_set_periodic; in setup_timer_ce() 130 ce->cpumask = cpu_possible_mask; in setup_timer_ce() 131 ce->shift = 32; in setup_timer_ce() 132 ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, in setup_timer_ce() [all …]
|