/linux-6.1.9/drivers/net/ethernet/chelsio/cxgb4/ |
D | l2t.c | 65 static inline unsigned int vlan_prio(const struct l2t_entry *e) in vlan_prio() argument 67 return e->vlan >> VLAN_PRIO_SHIFT; in vlan_prio() 70 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) in l2t_hold() argument 72 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ in l2t_hold() 118 static int addreq(const struct l2t_entry *e, const u32 *addr) in addreq() argument 120 if (e->v6) in addreq() 121 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | in addreq() 122 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); in addreq() 123 return e->addr[0] ^ addr[0]; in addreq() 126 static void neigh_replace(struct l2t_entry *e, struct neighbour *n) in neigh_replace() argument [all …]
|
D | smt.c | 68 struct smt_entry *e, *end; in find_or_alloc_smte() local 70 for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { in find_or_alloc_smte() 71 if (e->refcnt == 0) { in find_or_alloc_smte() 73 first_free = e; in find_or_alloc_smte() 75 if (e->state == SMT_STATE_SWITCHING) { in find_or_alloc_smte() 79 if (memcmp(e->src_mac, smac, ETH_ALEN) == 0) in find_or_alloc_smte() 86 e = first_free; in find_or_alloc_smte() 92 e->state = SMT_STATE_UNUSED; in find_or_alloc_smte() 95 return e; in find_or_alloc_smte() 98 static void t4_smte_free(struct smt_entry *e) in t4_smte_free() argument [all …]
|
D | sched.c | 47 struct sched_class *e; in t4_sched_class_fw_cmd() local 50 e = &s->tab[p->u.params.class]; in t4_sched_class_fw_cmd() 58 p->u.params.channel, e->idx, in t4_sched_class_fw_cmd() 125 struct sched_class *e, *end; in t4_sched_entry_lookup() local 130 for (e = &s->tab[0]; e != end; ++e) { in t4_sched_entry_lookup() 131 if (e->state == SCHED_STATE_UNUSED || in t4_sched_entry_lookup() 132 e->bind_type != type) in t4_sched_entry_lookup() 139 list_for_each_entry(qe, &e->entry_list, list) { in t4_sched_entry_lookup() 150 list_for_each_entry(fe, &e->entry_list, list) { in t4_sched_entry_lookup() 190 struct sched_class *e; in t4_sched_queue_unbind() local [all …]
|
/linux-6.1.9/drivers/net/ethernet/chelsio/cxgb3/ |
D | l2t.c | 63 static inline unsigned int vlan_prio(const struct l2t_entry *e) in vlan_prio() argument 65 return e->vlan >> 13; in vlan_prio() 74 static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) in neigh_replace() argument 77 if (e->neigh) in neigh_replace() 78 neigh_release(e->neigh); in neigh_replace() 79 e->neigh = n; in neigh_replace() 88 struct l2t_entry *e) in setup_l2e_send_pending() argument 101 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); in setup_l2e_send_pending() 102 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | in setup_l2e_send_pending() 103 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | in setup_l2e_send_pending() [all …]
|
/linux-6.1.9/tools/testing/selftests/powerpc/pmu/ |
D | event.c | 24 static void __event_init_opts(struct event *e, u64 config, in __event_init_opts() argument 27 memset(e, 0, sizeof(*e)); in __event_init_opts() 29 e->name = name; in __event_init_opts() 31 e->attr.type = type; in __event_init_opts() 32 e->attr.config = config; in __event_init_opts() 33 e->attr.size = sizeof(e->attr); in __event_init_opts() 35 e->attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | \ in __event_init_opts() 38 e->attr.sample_period = 1000; in __event_init_opts() 39 e->attr.sample_type = PERF_SAMPLE_REGS_INTR; in __event_init_opts() 40 e->attr.disabled = 1; in __event_init_opts() [all …]
|
/linux-6.1.9/scripts/kconfig/ |
D | expr.c | 16 static struct expr *expr_eliminate_yn(struct expr *e); 20 struct expr *e = xcalloc(1, sizeof(*e)); in expr_alloc_symbol() local 21 e->type = E_SYMBOL; in expr_alloc_symbol() 22 e->left.sym = sym; in expr_alloc_symbol() 23 return e; in expr_alloc_symbol() 28 struct expr *e = xcalloc(1, sizeof(*e)); in expr_alloc_one() local 29 e->type = type; in expr_alloc_one() 30 e->left.expr = ce; in expr_alloc_one() 31 return e; in expr_alloc_one() 36 struct expr *e = xcalloc(1, sizeof(*e)); in expr_alloc_two() local [all …]
|
/linux-6.1.9/drivers/media/test-drivers/vidtv/ |
D | vidtv_s302m.c | 165 static void vidtv_s302m_access_unit_destroy(struct vidtv_encoder *e) in vidtv_s302m_access_unit_destroy() argument 167 struct vidtv_access_unit *head = e->access_units; in vidtv_s302m_access_unit_destroy() 176 e->access_units = NULL; in vidtv_s302m_access_unit_destroy() 179 static void vidtv_s302m_alloc_au(struct vidtv_encoder *e) in vidtv_s302m_alloc_au() argument 184 if (e->sync && e->sync->is_video_encoder) { in vidtv_s302m_alloc_au() 185 sync_au = e->sync->access_units; in vidtv_s302m_alloc_au() 188 temp = vidtv_s302m_access_unit_init(e->access_units); in vidtv_s302m_alloc_au() 189 if (!e->access_units) in vidtv_s302m_alloc_au() 190 e->access_units = temp; in vidtv_s302m_alloc_au() 198 e->access_units = vidtv_s302m_access_unit_init(NULL); in vidtv_s302m_alloc_au() [all …]
|
/linux-6.1.9/fs/ |
D | binfmt_misc.c | 97 Node *e = list_entry(l, Node, list); in check_file() local 102 if (!test_bit(Enabled, &e->flags)) in check_file() 106 if (!test_bit(Magic, &e->flags)) { in check_file() 107 if (p && !strcmp(e->magic, p + 1)) in check_file() 108 return e; in check_file() 113 s = bprm->buf + e->offset; in check_file() 114 if (e->mask) { in check_file() 115 for (j = 0; j < e->size; j++) in check_file() 116 if ((*s++ ^ e->magic[j]) & e->mask[j]) in check_file() 119 for (j = 0; j < e->size; j++) in check_file() [all …]
|
/linux-6.1.9/security/apparmor/ |
D | policy_unpack_test.c | 47 struct aa_ext *e; member 55 struct aa_ext *e; in build_aa_ext_struct() local 60 e = kunit_kmalloc(test, sizeof(*e), GFP_USER); in build_aa_ext_struct() 61 KUNIT_EXPECT_NOT_ERR_OR_NULL(test, e); in build_aa_ext_struct() 63 e->start = buf; in build_aa_ext_struct() 64 e->end = e->start + buf_size; in build_aa_ext_struct() 65 e->pos = e->start; in build_aa_ext_struct() 71 buf = e->start + TEST_STRING_BUF_OFFSET; in build_aa_ext_struct() 76 buf = e->start + TEST_NAMED_U32_BUF_OFFSET; in build_aa_ext_struct() 83 buf = e->start + TEST_NAMED_U64_BUF_OFFSET; in build_aa_ext_struct() [all …]
|
D | policy_unpack.c | 106 const char *name, const char *info, struct aa_ext *e, in audit_iface() argument 111 if (e) in audit_iface() 112 aad(&sa)->iface.pos = e->pos - e->start; in audit_iface() 202 static bool inbounds(struct aa_ext *e, size_t size) in inbounds() argument 204 return (size <= e->end - e->pos); in inbounds() 223 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk) in unpack_u16_chunk() argument 226 void *pos = e->pos; in unpack_u16_chunk() 228 if (!inbounds(e, sizeof(u16))) in unpack_u16_chunk() 230 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); in unpack_u16_chunk() 231 e->pos += sizeof(__le16); in unpack_u16_chunk() [all …]
|
/linux-6.1.9/tools/testing/selftests/powerpc/pmu/ebb/ |
D | trace.c | 78 struct trace_entry *e; in trace_alloc_entry() local 80 e = trace_alloc(tb, sizeof(*e) + payload_size); in trace_alloc_entry() 81 if (e) in trace_alloc_entry() 82 e->length = payload_size; in trace_alloc_entry() 84 return e; in trace_alloc_entry() 89 struct trace_entry *e; in trace_log_reg() local 92 e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value)); in trace_log_reg() 93 if (!e) in trace_log_reg() 96 e->type = TRACE_TYPE_REG; in trace_log_reg() 97 p = (u64 *)e->data; in trace_log_reg() [all …]
|
/linux-6.1.9/drivers/md/ |
D | dm-cache-policy-smq.c | 87 struct entry *e; in __get_entry() local 89 e = es->begin + block; in __get_entry() 90 BUG_ON(e >= es->end); in __get_entry() 92 return e; in __get_entry() 95 static unsigned to_index(struct entry_space *es, struct entry *e) in to_index() argument 97 BUG_ON(e < es->begin || e >= es->end); in to_index() 98 return e - es->begin; in to_index() 132 static struct entry *l_next(struct entry_space *es, struct entry *e) in l_next() argument 134 return to_entry(es, e->next); in l_next() 137 static struct entry *l_prev(struct entry_space *es, struct entry *e) in l_prev() argument [all …]
|
/linux-6.1.9/block/ |
D | elevator.c | 63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() local 65 if (e->type->ops.allow_merge) in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 101 static bool elevator_match(const struct elevator_type *e, const char *name, in elevator_match() argument 104 if (!elv_support_features(e->elevator_features, required_features)) in elevator_match() 106 if (!strcmp(e->elevator_name, name)) in elevator_match() 108 if (e->elevator_alias && !strcmp(e->elevator_alias, name)) in elevator_match() 125 struct elevator_type *e; in elevator_find() local 127 list_for_each_entry(e, &elv_list, list) { in elevator_find() 128 if (elevator_match(e, name, required_features)) in elevator_find() [all …]
|
/linux-6.1.9/lib/ |
D | lru_cache.c | 39 #define PARANOIA_LC_ELEMENT(lc, e) do { \ argument 41 struct lc_element *e_ = (e); \ 95 struct lc_element *e; in lc_create() local 139 e = p + e_off; in lc_create() 140 e->lc_index = i; in lc_create() 141 e->lc_number = LC_FREE; in lc_create() 142 e->lc_new_number = LC_FREE; in lc_create() 143 list_add(&e->list, &lc->free); in lc_create() 144 element[i] = e; in lc_create() 213 struct lc_element *e = lc->lc_element[i]; in lc_reset() local [all …]
|
/linux-6.1.9/drivers/mtd/ubi/ |
D | wl.c | 127 struct ubi_wl_entry *e, struct rb_root *root); 129 struct ubi_wl_entry *e); 139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) in wl_tree_add() argument 150 if (e->ec < e1->ec) in wl_tree_add() 152 else if (e->ec > e1->ec) in wl_tree_add() 155 ubi_assert(e->pnum != e1->pnum); in wl_tree_add() 156 if (e->pnum < e1->pnum) in wl_tree_add() 163 rb_link_node(&e->u.rb, parent, p); in wl_tree_add() 164 rb_insert_color(&e->u.rb, root); in wl_tree_add() 175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument [all …]
|
D | fastmap-wl.c | 29 struct ubi_wl_entry *e, *victim = NULL; in find_anchor_wl_entry() local 32 ubi_rb_for_each_entry(p, e, root, u.rb) { in find_anchor_wl_entry() 33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { in find_anchor_wl_entry() 34 victim = e; in find_anchor_wl_entry() 35 max_ec = e->ec; in find_anchor_wl_entry() 43 struct ubi_wl_entry *e) in return_unused_peb() argument 45 wl_tree_add(e, &ubi->free); in return_unused_peb() 58 struct ubi_wl_entry *e; in return_unused_pool_pebs() local 61 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs() 62 return_unused_peb(ubi, e); in return_unused_pool_pebs() [all …]
|
/linux-6.1.9/tools/testing/selftests/powerpc/pmu/event_code_tests/ |
D | event_alternatives_tests_p10.c | 27 struct event *e, events[5]; in event_alternatives_tests_p10() local 47 e = &events[0]; in event_alternatives_tests_p10() 48 event_init(e, 0x0001e); in event_alternatives_tests_p10() 50 e = &events[1]; in event_alternatives_tests_p10() 51 event_init(e, EventCode_1); in event_alternatives_tests_p10() 53 e = &events[2]; in event_alternatives_tests_p10() 54 event_init(e, EventCode_2); in event_alternatives_tests_p10() 56 e = &events[3]; in event_alternatives_tests_p10() 57 event_init(e, EventCode_3); in event_alternatives_tests_p10() 59 e = &events[4]; in event_alternatives_tests_p10() [all …]
|
/linux-6.1.9/arch/sparc/vdso/ |
D | vma.c | 68 static void *one_section64(struct vdso_elfinfo64 *e, const char *name, in one_section64() argument 75 shdrs = (void *)e->hdr + e->hdr->e_shoff; in one_section64() 76 snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; in one_section64() 77 for (i = 1; i < e->hdr->e_shnum; i++) { in one_section64() 81 return (void *)e->hdr + shdrs[i].sh_offset; in one_section64() 89 struct vdso_elfinfo64 *e = &_e->u.elf64; in find_sections64() local 91 e->hdr = image->data; in find_sections64() 92 e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize); in find_sections64() 93 e->dynstr = one_section64(e, ".dynstr", NULL); in find_sections64() 95 if (!e->dynsym || !e->dynstr) { in find_sections64() [all …]
|
/linux-6.1.9/net/netfilter/ipset/ |
D | ip_set_list_set.c | 55 struct set_elem *e; in list_set_ktest() local 63 list_for_each_entry_rcu(e, &map->members, list) { in list_set_ktest() 64 ret = ip_set_test(e->id, skb, par, opt); in list_set_ktest() 67 if (ip_set_match_extensions(set, ext, mext, flags, e)) in list_set_ktest() 79 struct set_elem *e; in list_set_kadd() local 82 list_for_each_entry(e, &map->members, list) { in list_set_kadd() 84 ip_set_timeout_expired(ext_timeout(e, set))) in list_set_kadd() 86 ret = ip_set_add(e->id, skb, par, opt); in list_set_kadd() 99 struct set_elem *e; in list_set_kdel() local 102 list_for_each_entry(e, &map->members, list) { in list_set_kdel() [all …]
|
D | ip_set_hash_netportnet.c | 143 hash_netportnet4_init(struct hash_netportnet4_elem *e) in hash_netportnet4_init() argument 145 e->cidr[0] = HOST_MASK; in hash_netportnet4_init() 146 e->cidr[1] = HOST_MASK; in hash_netportnet4_init() 156 struct hash_netportnet4_elem e = { }; in hash_netportnet4_kadt() local 159 e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK); in hash_netportnet4_kadt() 160 e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK); in hash_netportnet4_kadt() 162 e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK; in hash_netportnet4_kadt() 165 &e.port, &e.proto)) in hash_netportnet4_kadt() 168 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]); in hash_netportnet4_kadt() 169 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]); in hash_netportnet4_kadt() [all …]
|
D | ip_set_hash_netnet.c | 133 hash_netnet4_init(struct hash_netnet4_elem *e) in hash_netnet4_init() argument 135 e->cidr[0] = HOST_MASK; in hash_netnet4_init() 136 e->cidr[1] = HOST_MASK; in hash_netnet4_init() 146 struct hash_netnet4_elem e = { }; in hash_netnet4_kadt() local 149 e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK); in hash_netnet4_kadt() 150 e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK); in hash_netnet4_kadt() 152 e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK; in hash_netnet4_kadt() 154 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]); in hash_netnet4_kadt() 155 ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]); in hash_netnet4_kadt() 156 e.ip[0] &= ip_set_netmask(e.cidr[0]); in hash_netnet4_kadt() [all …]
|
/linux-6.1.9/drivers/edac/ |
D | edac_mc.c | 56 static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e) in error_desc_to_mci() argument 58 return container_of(e, struct mem_ctl_info, error_desc); in error_desc_to_mci() 803 static void edac_inc_ce_error(struct edac_raw_error_desc *e) in edac_inc_ce_error() argument 805 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; in edac_inc_ce_error() 806 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_inc_ce_error() 809 mci->ce_mc += e->error_count; in edac_inc_ce_error() 812 dimm->ce_count += e->error_count; in edac_inc_ce_error() 814 mci->ce_noinfo_count += e->error_count; in edac_inc_ce_error() 817 static void edac_inc_ue_error(struct edac_raw_error_desc *e) in edac_inc_ue_error() argument 819 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; in edac_inc_ue_error() [all …]
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/esw/ |
D | indir_table.c | 105 mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e, in mlx5_esw_indir_table_rule_lookup() argument 110 list_for_each_entry(rule, &e->recirc_rules, list) in mlx5_esw_indir_table_rule_lookup() 125 struct mlx5_esw_indir_table_entry *e) in mlx5_esw_indir_table_rule_get() argument 138 rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr); in mlx5_esw_indir_table_rule_get() 142 if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX) in mlx5_esw_indir_table_rule_get() 228 handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1); in mlx5_esw_indir_table_rule_get() 241 list_add(&rule->list, &e->recirc_rules); in mlx5_esw_indir_table_rule_get() 242 e->recirc_cnt++; in mlx5_esw_indir_table_rule_get() 262 struct mlx5_esw_indir_table_entry *e) in mlx5_esw_indir_table_rule_put() argument 268 list_for_each_entry(rule, &e->recirc_rules, list) in mlx5_esw_indir_table_rule_put() [all …]
|
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_sync.c | 135 struct amdgpu_sync_entry *e; in amdgpu_sync_add_later() local 137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later() 138 if (unlikely(e->fence->context != f->context)) in amdgpu_sync_add_later() 141 amdgpu_sync_keep_later(&e->fence, f); in amdgpu_sync_add_later() 157 struct amdgpu_sync_entry *e; in amdgpu_sync_fence() local 165 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); in amdgpu_sync_fence() 166 if (!e) in amdgpu_sync_fence() 169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence() 170 e->fence = dma_fence_get(f); in amdgpu_sync_fence() 274 struct amdgpu_sync_entry *e; in amdgpu_sync_peek_fence() local [all …]
|
/linux-6.1.9/arch/arm64/kvm/vgic/ |
D | vgic-irqfd.c | 18 static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e, in vgic_irqfd_set_irq() argument 22 unsigned int spi_id = e->irqchip.pin + VGIC_NR_PRIVATE_IRQS; in vgic_irqfd_set_irq() 39 struct kvm_kernel_irq_routing_entry *e, in kvm_set_routing_entry() argument 46 e->set = vgic_irqfd_set_irq; in kvm_set_routing_entry() 47 e->irqchip.irqchip = ue->u.irqchip.irqchip; in kvm_set_routing_entry() 48 e->irqchip.pin = ue->u.irqchip.pin; in kvm_set_routing_entry() 49 if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) || in kvm_set_routing_entry() 50 (e->irqchip.irqchip >= KVM_NR_IRQCHIPS)) in kvm_set_routing_entry() 54 e->set = kvm_set_msi; in kvm_set_routing_entry() 55 e->msi.address_lo = ue->u.msi.address_lo; in kvm_set_routing_entry() [all …]
|