/linux-6.1.9/drivers/usb/mtu3/ |
D | mtu3_gadget.c | 15 __releases(mep->mtu->lock) in mtu3_req_complete() 16 __acquires(mep->mtu->lock) in mtu3_req_complete() 19 struct mtu3 *mtu = mreq->mtu; in mtu3_req_complete() local 26 spin_unlock(&mtu->lock); in mtu3_req_complete() 30 usb_gadget_unmap_request(&mtu->g, req, mep->is_in); in mtu3_req_complete() 32 dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", in mtu3_req_complete() 36 spin_lock(&mtu->lock); in mtu3_req_complete() 46 dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); in nuke() 63 struct mtu3 *mtu = mep->mtu; in mtu3_ep_enable() local 74 switch (mtu->g.speed) { in mtu3_ep_enable() [all …]
|
D | mtu3_core.c | 45 dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n", in ep_fifo_alloc() 66 dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n", in ep_fifo_free() 71 static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable) in mtu3_ss_func_set() argument 75 mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set() 77 mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set() 79 dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable); in mtu3_ss_func_set() 83 static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) in mtu3_hs_softconn_set() argument 86 mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set() 89 mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set() 92 dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); in mtu3_hs_softconn_set() [all …]
|
D | mtu3_gadget_ep0.c | 18 #define next_ep0_request(mtu) next_request((mtu)->ep0) argument 39 static char *decode_ep0_state(struct mtu3 *mtu) in decode_ep0_state() argument 41 switch (mtu->ep0_state) { in decode_ep0_state() 57 static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) in ep0_req_giveback() argument 59 mtu3_req_complete(mtu->ep0, req, 0); in ep0_req_giveback() 63 forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) in forward_to_driver() argument 64 __releases(mtu->lock) in forward_to_driver() 65 __acquires(mtu->lock) in forward_to_driver() 69 if (!mtu->gadget_driver || !mtu->async_callbacks) in forward_to_driver() 72 spin_unlock(&mtu->lock); in forward_to_driver() [all …]
|
D | mtu3_qmu.c | 38 #define GPD_RX_BUF_LEN(mtu, x) \ argument 41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \ 46 #define GPD_DATA_LEN(mtu, x) \ argument 49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \ 57 #define GPD_EXT_NGP(mtu, x) \ argument 60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \ 63 #define GPD_EXT_BUF(mtu, x) \ argument 66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \ 171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); in mtu3_gpd_ring_alloc() 184 dma_pool_free(mep->mtu->qmu_gpd_pool, in mtu3_gpd_ring_free() [all …]
|
D | mtu3_debugfs.c | 81 struct mtu3 *mtu = sf->private; in mtu3_link_state_show() local 82 void __iomem *mbase = mtu->mac_base; in mtu3_link_state_show() 93 struct mtu3 *mtu = sf->private; in mtu3_ep_used_show() local 99 spin_lock_irqsave(&mtu->lock, flags); in mtu3_ep_used_show() 101 for (i = 0; i < mtu->num_eps; i++) { in mtu3_ep_used_show() 102 mep = mtu->in_eps + i; in mtu3_ep_used_show() 108 mep = mtu->out_eps + i; in mtu3_ep_used_show() 116 spin_unlock_irqrestore(&mtu->lock, flags); in mtu3_ep_used_show() 124 static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base, in mtu3_debugfs_regset() argument 131 mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL); in mtu3_debugfs_regset() [all …]
|
D | mtu3.h | 276 struct mtu3 *mtu; member 299 struct mtu3 *mtu; member 422 int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 424 void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep); 426 void mtu3_start(struct mtu3 *mtu); 427 void mtu3_stop(struct mtu3 *mtu); 428 void mtu3_dev_on_off(struct mtu3 *mtu, int is_on); 430 int mtu3_gadget_setup(struct mtu3 *mtu); 431 void mtu3_gadget_cleanup(struct mtu3 *mtu); 432 void mtu3_gadget_reset(struct mtu3 *mtu); [all …]
|
/linux-6.1.9/drivers/clocksource/ |
D | sh_mtu2.c | 33 struct sh_mtu2_device *mtu; member 161 return ioread8(ch->mtu->mapbase + 0x280); in sh_mtu2_read() 177 return iowrite8(value, ch->mtu->mapbase + 0x280); in sh_mtu2_write() 192 raw_spin_lock_irqsave(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch() 201 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch() 210 pm_runtime_get_sync(&ch->mtu->pdev->dev); in sh_mtu2_enable() 211 dev_pm_syscore_device(&ch->mtu->pdev->dev, true); in sh_mtu2_enable() 214 ret = clk_enable(ch->mtu->clk); in sh_mtu2_enable() 216 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", in sh_mtu2_enable() 224 rate = clk_get_rate(ch->mtu->clk) / 64; in sh_mtu2_enable() [all …]
|
/linux-6.1.9/tools/testing/selftests/net/ |
D | pmtu.sh | 956 mtu() { function 959 mtu="${3}" 961 ${ns_cmd} ip link set dev ${dev} mtu ${mtu} 1043 mtu "${ns_a}" veth_A-R1 2000 1044 mtu "${ns_r1}" veth_R1-A 2000 1045 mtu "${ns_r1}" veth_R1-B 1400 1046 mtu "${ns_b}" veth_B-R1 1400 1048 mtu "${ns_a}" veth_A-R2 2000 1049 mtu "${ns_r2}" veth_R2-A 2000 1050 mtu "${ns_r2}" veth_R2-B 1500 [all …]
|
D | fib_nexthop_multiprefix.sh | 156 local mtu=$2 158 run_cmd ip -netns h${hostid} li set eth0 mtu ${mtu} 159 run_cmd ip -netns r1 li set eth${hostid} mtu ${mtu} 168 local mtu=$2 183 echo " cache .* mtu ${mtu}" 188 grep -q "cache .* mtu ${mtu}" 191 log_test $rc 0 "IPv4: host 0 to host ${i}, mtu ${mtu}" 197 local mtu=$2 212 echo " ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}" 217 grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}" [all …]
|
/linux-6.1.9/tools/testing/selftests/net/forwarding/ |
D | ipip_lib.sh | 320 local mtu=$1 322 ip link set mtu $mtu dev $h1 323 ip link set mtu $mtu dev $ol1 324 ip link set mtu $mtu dev g1a 325 ip link set mtu $mtu dev $ul1 326 ip link set mtu $mtu dev $ul1.111 327 ip link set mtu $mtu dev $h2 328 ip link set mtu $mtu dev $ol2 329 ip link set mtu $mtu dev g2a 330 ip link set mtu $mtu dev $ul2 [all …]
|
D | ip6gre_lib.sh | 411 local mtu=$1 413 ip link set mtu $mtu dev $h1 414 ip link set mtu $mtu dev $ol1 415 ip link set mtu $mtu dev g1a 416 ip link set mtu $mtu dev $ul1 417 ip link set mtu $mtu dev $ul1.111 418 ip link set mtu $mtu dev $h2 419 ip link set mtu $mtu dev $ol2 420 ip link set mtu $mtu dev g2a 421 ip link set mtu $mtu dev $ul2 [all …]
|
/linux-6.1.9/tools/testing/selftests/bpf/prog_tests/ |
D | check_mtu.c | 104 static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex) in test_check_mtu_xdp() argument 114 skel->rodata->GLOBAL_USER_MTU = mtu; in test_check_mtu_xdp() 121 test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu); in test_check_mtu_xdp() 122 test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu); in test_check_mtu_xdp() 123 test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu); in test_check_mtu_xdp() 124 test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu); in test_check_mtu_xdp() 125 test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu); in test_check_mtu_xdp() 157 static void test_check_mtu_tc(__u32 mtu, __u32 ifindex) in test_check_mtu_tc() argument 167 skel->rodata->GLOBAL_USER_MTU = mtu; in test_check_mtu_tc() 174 test_check_mtu_run_tc(skel, skel->progs.tc_use_helper, mtu); in test_check_mtu_tc() [all …]
|
/linux-6.1.9/drivers/infiniband/sw/rxe/ |
D | rxe_param.h | 14 static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu) in rxe_mtu_int_to_enum() argument 16 if (mtu < 256) in rxe_mtu_int_to_enum() 18 else if (mtu < 512) in rxe_mtu_int_to_enum() 20 else if (mtu < 1024) in rxe_mtu_int_to_enum() 22 else if (mtu < 2048) in rxe_mtu_int_to_enum() 24 else if (mtu < 4096) in rxe_mtu_int_to_enum() 31 static inline enum ib_mtu eth_mtu_int_to_enum(int mtu) in eth_mtu_int_to_enum() argument 33 mtu -= RXE_MAX_HDR_LENGTH; in eth_mtu_int_to_enum() 35 return rxe_mtu_int_to_enum(mtu); in eth_mtu_int_to_enum()
|
D | rxe.c | 154 enum ib_mtu mtu; in rxe_set_mtu() local 156 mtu = eth_mtu_int_to_enum(ndev_mtu); in rxe_set_mtu() 159 mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256; in rxe_set_mtu() 161 port->attr.active_mtu = mtu; in rxe_set_mtu() 162 port->mtu_cap = ib_mtu_enum_to_int(mtu); in rxe_set_mtu() 168 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name) in rxe_add() argument 171 rxe_set_mtu(rxe, mtu); in rxe_add()
|
/linux-6.1.9/net/ipv6/ |
D | xfrm6_output.c | 19 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) in xfrm6_local_rxpmtu() argument 27 ipv6_local_rxpmtu(sk, &fl6, mtu); in xfrm6_local_rxpmtu() 30 void xfrm6_local_error(struct sk_buff *skb, u32 mtu) in xfrm6_local_error() argument 40 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); in xfrm6_local_error() 65 unsigned int mtu; in __xfrm6_output() local 79 mtu = ip6_skb_dst_mtu(skb); in __xfrm6_output() 81 mtu = dst_mtu(skb_dst(skb)); in __xfrm6_output() 83 toobig = skb->len > mtu && !skb_is_gso(skb); in __xfrm6_output() 86 xfrm6_local_rxpmtu(skb, mtu); in __xfrm6_output() 93 xfrm_local_error(skb, mtu); in __xfrm6_output()
|
D | ip6_output.c | 141 struct sk_buff *skb, unsigned int mtu) in ip6_finish_output_gso_slowpath_drop() argument 174 unsigned int mtu; in __ip6_finish_output() local 184 mtu = ip6_skb_dst_mtu(skb); in __ip6_finish_output() 187 !skb_gso_validate_network_len(skb, mtu)) in __ip6_finish_output() 188 return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu); in __ip6_finish_output() 190 if ((skb->len > mtu && !skb_is_gso(skb)) || in __ip6_finish_output() 264 u32 mtu; in ip6_xmit() local 329 mtu = dst_mtu(dst); in ip6_xmit() 330 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { in ip6_xmit() 352 ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu); in ip6_xmit() [all …]
|
D | netfilter.c | 133 unsigned int mtu, hlen; in br_ip6_fragment() local 143 mtu = skb->dev->mtu; in br_ip6_fragment() 144 if (frag_max_size > mtu || in br_ip6_fragment() 148 mtu = frag_max_size; in br_ip6_fragment() 149 if (mtu < hlen + sizeof(struct frag_hdr) + 8) in br_ip6_fragment() 151 mtu -= hlen + sizeof(struct frag_hdr); in br_ip6_fragment() 166 if (first_len - hlen > mtu || in br_ip6_fragment() 174 if (frag2->len > mtu || in br_ip6_fragment() 215 ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom, in br_ip6_fragment()
|
/linux-6.1.9/include/net/ |
D | ip6_route.h | 185 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, 187 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); 203 void rt6_mtu_change(struct net_device *dev, unsigned int mtu); 271 unsigned int mtu; in ip6_skb_dst_mtu() local 274 mtu = READ_ONCE(dst->dev->mtu); in ip6_skb_dst_mtu() 275 mtu -= lwtunnel_headroom(dst->lwtstate, mtu); in ip6_skb_dst_mtu() 277 mtu = dst_mtu(dst); in ip6_skb_dst_mtu() 279 return mtu; in ip6_skb_dst_mtu() 323 unsigned int mtu; in ip6_dst_mtu_maybe_forward() local 326 mtu = dst_metric_raw(dst, RTAX_MTU); in ip6_dst_mtu_maybe_forward() [all …]
|
/linux-6.1.9/net/rxrpc/ |
D | peer_event.c | 44 *info = ntohs(icmp->un.frag.mtu); in rxrpc_lookup_peer_icmp_rcu() 349 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) in rxrpc_adjust_mtu() argument 351 _net("Rx ICMP Fragmentation Needed (%d)", mtu); in rxrpc_adjust_mtu() 354 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { in rxrpc_adjust_mtu() 355 peer->if_mtu = mtu; in rxrpc_adjust_mtu() 356 _net("I/F MTU %u", mtu); in rxrpc_adjust_mtu() 359 if (mtu == 0) { in rxrpc_adjust_mtu() 361 mtu = peer->if_mtu; in rxrpc_adjust_mtu() 362 if (mtu > 1500) { in rxrpc_adjust_mtu() 363 mtu >>= 1; in rxrpc_adjust_mtu() [all …]
|
/linux-6.1.9/net/ipv4/ |
D | ip_forward.c | 43 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) in ip_exceeds_mtu() argument 45 if (skb->len <= mtu) in ip_exceeds_mtu() 52 if (unlikely(IPCB(skb)->frag_max_size > mtu)) in ip_exceeds_mtu() 58 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) in ip_exceeds_mtu() 88 u32 mtu; in ip_forward() local 135 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); in ip_forward() 136 if (ip_exceeds_mtu(skb, mtu)) { in ip_forward() 139 htonl(mtu)); in ip_forward()
|
D | ip_tunnel.c | 285 int mtu = ETH_DATA_LEN; in ip_tunnel_bind_dev() local 316 mtu = min(tdev->mtu, IP_MAX_MTU); in ip_tunnel_bind_dev() 320 mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0); in ip_tunnel_bind_dev() 322 if (mtu < IPV4_MIN_MTU) in ip_tunnel_bind_dev() 323 mtu = IPV4_MIN_MTU; in ip_tunnel_bind_dev() 325 return mtu; in ip_tunnel_bind_dev() 335 int mtu; in ip_tunnel_create() local 342 mtu = ip_tunnel_bind_dev(dev); in ip_tunnel_create() 343 err = dev_set_mtu(dev, mtu); in ip_tunnel_create() 491 int mtu; in tnl_update_pmtu() local [all …]
|
/linux-6.1.9/net/sched/ |
D | sch_teql.c | 193 dev->mtu < m->dev->mtu) in teql_qdisc_init() 202 if (dev->mtu < m->dev->mtu) in teql_qdisc_init() 203 m->dev->mtu = dev->mtu; in teql_qdisc_init() 210 m->dev->mtu = dev->mtu; in teql_qdisc_init() 359 int mtu = 0xFFFE; in teql_master_open() local 374 if (slave->mtu < mtu) in teql_master_open() 375 mtu = slave->mtu; in teql_master_open() 391 m->dev->mtu = mtu; in teql_master_open() 422 if (new_mtu > qdisc_dev(q)->mtu) in teql_master_mtu() 427 dev->mtu = new_mtu; in teql_master_mtu() [all …]
|
/linux-6.1.9/drivers/infiniband/hw/irdma/ |
D | main.c | 55 static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev) in irdma_log_invalid_mtu() argument 57 if (mtu < IRDMA_MIN_MTU_IPV4) in irdma_log_invalid_mtu() 58 …rn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu); in irdma_log_invalid_mtu() 59 else if (mtu < IRDMA_MIN_MTU_IPV6) in irdma_log_invalid_mtu() 60 …(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu); in irdma_log_invalid_mtu() 94 ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); in irdma_iidc_event_handler() 95 if (iwdev->vsi.mtu != iwdev->netdev->mtu) { in irdma_iidc_event_handler() 96 l2params.mtu = iwdev->netdev->mtu; in irdma_iidc_event_handler() 98 irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); in irdma_iidc_event_handler() 288 l2params.mtu = iwdev->netdev->mtu; in irdma_probe()
|
/linux-6.1.9/net/mctp/test/ |
D | route-test.c | 42 unsigned int mtu) in mctp_test_create_route() argument 52 rt->rt.mtu = mtu; in mctp_test_create_route() 124 unsigned int mtu; member 132 int rc, i, n, mtu, msgsize; in mctp_test_fragment() local 139 mtu = params->mtu; in mctp_test_fragment() 150 rt = mctp_test_create_route(&init_net, NULL, 10, mtu); in mctp_test_fragment() 153 rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER); in mctp_test_fragment() 200 KUNIT_EXPECT_EQ(test, skb2->len, mtu); in mctp_test_fragment() 202 KUNIT_EXPECT_LE(test, skb2->len, mtu); in mctp_test_fragment() 211 {.mtu = 68, .msgsize = 63, .n_frags = 1}, [all …]
|
/linux-6.1.9/include/rdma/ |
D | ib_addr.h | 174 static inline enum ib_mtu iboe_get_mtu(int mtu) in iboe_get_mtu() argument 179 mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + in iboe_get_mtu() 183 if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) in iboe_get_mtu() 185 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048)) in iboe_get_mtu() 187 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) in iboe_get_mtu() 189 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512)) in iboe_get_mtu() 191 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256)) in iboe_get_mtu()
|