Home
last modified time | relevance | path

Searched refs:nn (Results 1 – 25 of 116) sorted by relevance

12345

/linux-6.1.9/drivers/net/ethernet/netronome/nfp/
Dnfp_net_common.c82 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) in nfp_net_reconfig_start() argument
84 nn_writel(nn, NFP_NET_CFG_UPDATE, update); in nfp_net_reconfig_start()
86 nn_pci_flush(nn); in nfp_net_reconfig_start()
87 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); in nfp_net_reconfig_start()
88 nn->reconfig_in_progress_update = update; in nfp_net_reconfig_start()
92 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update) in nfp_net_reconfig_start_async() argument
94 update |= nn->reconfig_posted; in nfp_net_reconfig_start_async()
95 nn->reconfig_posted = 0; in nfp_net_reconfig_start_async()
97 nfp_net_reconfig_start(nn, update); in nfp_net_reconfig_start_async()
99 nn->reconfig_timer_active = true; in nfp_net_reconfig_start_async()
[all …]
Dccm_mbox.c54 static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn) in nfp_ccm_mbox_max_msg() argument
56 return round_down(nn->tlv_caps.mbox_len, 4) - in nfp_ccm_mbox_max_msg()
116 static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb) in nfp_ccm_mbox_is_first() argument
118 return skb_queue_is_first(&nn->mbox_cmsg.queue, skb); in nfp_ccm_mbox_is_first()
121 static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb) in nfp_ccm_mbox_should_run() argument
128 static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn) in nfp_ccm_mbox_mark_next_runner() argument
133 skb = skb_peek(&nn->mbox_cmsg.queue); in nfp_ccm_mbox_mark_next_runner()
140 queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work); in nfp_ccm_mbox_mark_next_runner()
144 nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len) in nfp_ccm_mbox_write_tlv() argument
146 nn_writel(nn, off, in nfp_ccm_mbox_write_tlv()
[all …]
Dnfp_netvf_main.c29 struct nfp_net *nn; member
61 static void nfp_netvf_get_mac_addr(struct nfp_net *nn) in nfp_netvf_get_mac_addr() argument
65 put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]); in nfp_netvf_get_mac_addr()
66 put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]); in nfp_netvf_get_mac_addr()
69 eth_hw_addr_random(nn->dp.netdev); in nfp_netvf_get_mac_addr()
73 eth_hw_addr_set(nn->dp.netdev, mac_addr); in nfp_netvf_get_mac_addr()
74 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); in nfp_netvf_get_mac_addr()
89 struct nfp_net *nn; in nfp_netvf_pci_probe() local
191 nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, true, in nfp_netvf_pci_probe()
193 if (IS_ERR(nn)) { in nfp_netvf_pci_probe()
[all …]
Dnfp_net_ethtool.c220 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_drvinfo() local
223 nn->fw_ver.extend, nn->fw_ver.class, in nfp_net_get_drvinfo()
224 nn->fw_ver.major, nn->fw_ver.minor); in nfp_net_get_drvinfo()
225 strscpy(drvinfo->bus_info, pci_name(nn->pdev), in nfp_net_get_drvinfo()
228 nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo); in nfp_net_get_drvinfo()
309 struct nfp_net *nn; in nfp_net_get_link_ksettings() local
347 nn = netdev_priv(netdev); in nfp_net_get_link_ksettings()
349 sts = nn_readw(nn, NFP_NET_CFG_STS); in nfp_net_get_link_ksettings()
428 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_ringparam() local
429 u32 qc_max = nn->dev_info->max_qc_size; in nfp_net_get_ringparam()
[all …]
Dnfp_net_main.c80 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) in nfp_net_pf_free_vnic() argument
82 if (nfp_net_is_data_vnic(nn)) in nfp_net_pf_free_vnic()
83 nfp_app_vnic_free(pf->app, nn); in nfp_net_pf_free_vnic()
84 nfp_port_free(nn->port); in nfp_net_pf_free_vnic()
85 list_del(&nn->vnic_list); in nfp_net_pf_free_vnic()
87 nfp_net_free(nn); in nfp_net_pf_free_vnic()
92 struct nfp_net *nn, *next; in nfp_net_pf_free_vnics() local
94 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) in nfp_net_pf_free_vnics()
95 if (nfp_net_is_data_vnic(nn)) in nfp_net_pf_free_vnics()
96 nfp_net_pf_free_vnic(pf, nn); in nfp_net_pf_free_vnics()
[all …]
Dnfp_net.h28 #define nn_pr(nn, lvl, fmt, args...) \ argument
30 struct nfp_net *__nn = (nn); \
38 #define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args) argument
39 #define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args) argument
40 #define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args) argument
41 #define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args) argument
711 static inline u16 nn_readb(struct nfp_net *nn, int off) in nn_readb() argument
713 return readb(nn->dp.ctrl_bar + off); in nn_readb()
716 static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) in nn_writeb() argument
718 writeb(val, nn->dp.ctrl_bar + off); in nn_writeb()
[all …]
Dnfp_app_nic.c12 struct nfp_net *nn, unsigned int id) in nfp_app_nic_vnic_init_phy_port() argument
19 nn->port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, nn->dp.netdev); in nfp_app_nic_vnic_init_phy_port()
20 if (IS_ERR(nn->port)) in nfp_app_nic_vnic_init_phy_port()
21 return PTR_ERR(nn->port); in nfp_app_nic_vnic_init_phy_port()
23 err = nfp_port_init_phy_port(pf, app, nn->port, id); in nfp_app_nic_vnic_init_phy_port()
25 nfp_port_free(nn->port); in nfp_app_nic_vnic_init_phy_port()
29 return nn->port->type == NFP_PORT_INVALID; in nfp_app_nic_vnic_init_phy_port()
32 int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, in nfp_app_nic_vnic_alloc() argument
37 err = nfp_app_nic_vnic_init_phy_port(app->pf, app, nn, id); in nfp_app_nic_vnic_alloc()
41 nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port); in nfp_app_nic_vnic_alloc()
Dnfp_net_dp.c57 struct nfp_net *nn = r_vec->nfp_net; in nfp_net_tx_ring_init() local
64 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; in nfp_net_tx_ring_init()
66 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); in nfp_net_tx_ring_init()
79 struct nfp_net *nn = r_vec->nfp_net; in nfp_net_rx_ring_init() local
85 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; in nfp_net_rx_ring_init()
86 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); in nfp_net_rx_ring_init()
183 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_tx_rings_prepare() argument
207 &nn->r_vecs[r - bias], r, bias); in nfp_net_tx_rings_prepare()
339 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_rx_rings_prepare() argument
349 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); in nfp_net_rx_rings_prepare()
[all …]
Dnfp_net_debugfs.c18 struct nfp_net *nn; in nfp_rx_q_show() local
26 nn = r_vec->nfp_net; in nfp_rx_q_show()
28 if (!nfp_net_running(nn)) in nfp_rx_q_show()
84 struct nfp_net *nn; in nfp_tx_q_show() local
95 nn = r_vec->nfp_net; in nfp_tx_q_show()
96 if (!nfp_net_running(nn)) in nfp_tx_q_show()
111 nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring, in nfp_tx_q_show()
124 void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) in nfp_net_debugfs_vnic_add() argument
133 if (nfp_net_is_data_vnic(nn)) in nfp_net_debugfs_vnic_add()
134 sprintf(name, "vnic%d", nn->id); in nfp_net_debugfs_vnic_add()
[all …]
Dnfp_app.h93 const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
98 int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn,
100 void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn);
101 int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn);
102 void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
134 int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
136 int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
188 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
189 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
207 static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, in nfp_app_vnic_alloc() argument
[all …]
Dnfp_net_xsk.c113 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_xsk_setup_pool() local
120 if (nn->dp.ops->version == NFP_NFD_VER_NFDK) in nfp_net_xsk_setup_pool()
124 if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_net_xsk_setup_pool()
126 if (!nn->dp.chained_metadata_format) in nfp_net_xsk_setup_pool()
131 err = nfp_net_xsk_pool_map(nn->dp.dev, pool); in nfp_net_xsk_setup_pool()
137 dp = nfp_net_clone_dp(nn); in nfp_net_xsk_setup_pool()
146 err = nfp_net_ring_reconfig(nn, dp, NULL); in nfp_net_xsk_setup_pool()
152 nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool); in nfp_net_xsk_setup_pool()
157 nfp_net_xsk_pool_unmap(nn->dp.dev, pool); in nfp_net_xsk_setup_pool()
164 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_xsk_wakeup() local
[all …]
Dccm.h114 int nfp_ccm_mbox_alloc(struct nfp_net *nn);
115 void nfp_ccm_mbox_free(struct nfp_net *nn);
116 int nfp_ccm_mbox_init(struct nfp_net *nn);
117 void nfp_ccm_mbox_clean(struct nfp_net *nn);
118 bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
120 nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
122 int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
126 int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
130 int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
/linux-6.1.9/fs/nfsd/
Dnfssvc.c182 nfsd_netns_free_versions(struct nfsd_net *nn) in nfsd_netns_free_versions() argument
184 kfree(nn->nfsd_versions); in nfsd_netns_free_versions()
185 kfree(nn->nfsd4_minorversions); in nfsd_netns_free_versions()
186 nn->nfsd_versions = NULL; in nfsd_netns_free_versions()
187 nn->nfsd4_minorversions = NULL; in nfsd_netns_free_versions()
191 nfsd_netns_init_versions(struct nfsd_net *nn) in nfsd_netns_init_versions() argument
193 if (!nn->nfsd_versions) { in nfsd_netns_init_versions()
194 nn->nfsd_versions = nfsd_alloc_versions(); in nfsd_netns_init_versions()
195 nn->nfsd4_minorversions = nfsd_alloc_minorversions(); in nfsd_netns_init_versions()
196 if (!nn->nfsd_versions || !nn->nfsd4_minorversions) in nfsd_netns_init_versions()
[all …]
Dnfscache.c89 struct nfsd_net *nn) in nfsd_reply_cache_alloc() argument
115 struct nfsd_net *nn) in nfsd_reply_cache_free_locked() argument
118 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); in nfsd_reply_cache_free_locked()
124 atomic_dec(&nn->num_drc_entries); in nfsd_reply_cache_free_locked()
125 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp)); in nfsd_reply_cache_free_locked()
132 struct nfsd_net *nn) in nfsd_reply_cache_free() argument
135 nfsd_reply_cache_free_locked(b, rp, nn); in nfsd_reply_cache_free()
151 static int nfsd_reply_cache_stats_init(struct nfsd_net *nn) in nfsd_reply_cache_stats_init() argument
153 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); in nfsd_reply_cache_stats_init()
156 static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn) in nfsd_reply_cache_stats_destroy() argument
[all …]
Dnfs4recover.c170 const char *dname, int len, struct nfsd_net *nn) in __nfsd4_create_reclaim_record_grace() argument
183 crp = nfs4_client_to_reclaim(name, princhash, nn); in __nfsd4_create_reclaim_record_grace()
198 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_create_clid_dir() local
202 if (!nn->rec_file) in nfsd4_create_clid_dir()
213 status = mnt_want_write_file(nn->rec_file); in nfsd4_create_clid_dir()
217 dir = nn->rec_file->f_path.dentry; in nfsd4_create_clid_dir()
242 if (nn->in_grace) in nfsd4_create_clid_dir()
244 HEXDIR_LEN, nn); in nfsd4_create_clid_dir()
245 vfs_fsync(nn->rec_file, 0); in nfsd4_create_clid_dir()
252 mnt_drop_write_file(nn->rec_file); in nfsd4_create_clid_dir()
[all …]
Dnfsctl.c147 struct nfsd_net *nn = net_generic(net, nfsd_net_id); in exports_net_open() local
154 seq->private = nn->svc_export_cache; in exports_net_open()
524 nfsd_print_version_support(struct nfsd_net *nn, char *buf, int remaining, in nfsd_print_version_support() argument
528 bool supported = !!nfsd_vers(nn, vers, NFSD_TEST); in nfsd_print_version_support()
531 !nfsd_minorversion(nn, minor, NFSD_TEST)) in nfsd_print_version_support()
551 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); in __write_versions() local
554 if (nn->nfsd_serv) in __write_versions()
586 nfsd_vers(nn, num, cmd); in __write_versions()
590 if (nfsd_minorversion(nn, minor, cmd) < 0) in __write_versions()
592 } else if ((cmd == NFSD_SET) != nfsd_vers(nn, num, NFSD_TEST)) { in __write_versions()
[all …]
Dnfs4state.c85 void nfsd4_end_grace(struct nfsd_net *nn);
86 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
163 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn, in nfsd4_dec_courtesy_client_count() argument
167 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); in nfsd4_dec_courtesy_client_count()
172 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in get_client_locked() local
174 lockdep_assert_held(&nn->client_lock); in get_client_locked()
179 nfsd4_dec_courtesy_client_count(nn, clp); in get_client_locked()
188 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in renew_client_locked() local
199 list_move_tail(&clp->cl_lru, &nn->client_lru); in renew_client_locked()
201 nfsd4_dec_courtesy_client_count(nn, clp); in renew_client_locked()
[all …]
/linux-6.1.9/drivers/net/ethernet/netronome/nfp/crypto/
Dtls.c31 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on) in nfp_net_crypto_set_op() argument
35 off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4); in nfp_net_crypto_set_op()
37 val = nn_readl(nn, off); in nfp_net_crypto_set_op()
42 nn_writel(nn, off, val); in nfp_net_crypto_set_op()
46 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add, in __nfp_net_tls_conn_cnt_changed() argument
54 nn->ktls_tx_conn_cnt += add; in __nfp_net_tls_conn_cnt_changed()
55 cnt = nn->ktls_tx_conn_cnt; in __nfp_net_tls_conn_cnt_changed()
56 nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt; in __nfp_net_tls_conn_cnt_changed()
59 nn->ktls_rx_conn_cnt += add; in __nfp_net_tls_conn_cnt_changed()
60 cnt = nn->ktls_rx_conn_cnt; in __nfp_net_tls_conn_cnt_changed()
[all …]
/linux-6.1.9/drivers/fpga/
Ddfl-n3000-nios.c167 struct n3000_nios *nn = dev_get_drvdata(dev); in nios_fw_version_show() local
171 ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val); in nios_fw_version_show()
190 static int get_retimer_mode(struct n3000_nios *nn, unsigned int mode_stat_reg, in get_retimer_mode() argument
196 ret = regmap_read(nn->regmap, mode_stat_reg, &val); in get_retimer_mode()
211 struct n3000_nios *nn = dev_get_drvdata(dev); in retimer_A_mode_show() local
215 ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &mode); in retimer_A_mode_show()
226 struct n3000_nios *nn = dev_get_drvdata(dev); in retimer_B_mode_show() local
230 ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &mode); in retimer_B_mode_show()
242 struct n3000_nios *nn = dev_get_drvdata(dev); in fec_mode_show() local
246 ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val); in fec_mode_show()
[all …]
/linux-6.1.9/fs/nfs/blocklayout/
Drpc_pipefs.c56 struct nfs_net *nn = net_generic(net, nfs_net_id); in bl_resolve_deviceid() local
57 struct bl_dev_msg *reply = &nn->bl_mount_reply; in bl_resolve_deviceid()
67 mutex_lock(&nn->bl_mutex); in bl_resolve_deviceid()
68 bl_pipe_msg.bl_wq = &nn->bl_wq; in bl_resolve_deviceid()
86 add_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
87 rc = rpc_queue_upcall(nn->bl_device_pipe, msg); in bl_resolve_deviceid()
89 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
95 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
107 mutex_unlock(&nn->bl_mutex); in bl_resolve_deviceid()
114 struct nfs_net *nn = net_generic(file_inode(filp)->i_sb->s_fs_info, in bl_pipe_downcall() local
[all …]
/linux-6.1.9/fs/ocfs2/cluster/
Dtcp.c282 static u8 o2net_num_from_nn(struct o2net_node *nn) in o2net_num_from_nn() argument
284 BUG_ON(nn == NULL); in o2net_num_from_nn()
285 return nn - o2net_nodes; in o2net_num_from_nn()
290 static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw) in o2net_prep_nsw() argument
294 spin_lock(&nn->nn_lock); in o2net_prep_nsw()
295 ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC); in o2net_prep_nsw()
298 list_add_tail(&nsw->ns_node_item, &nn->nn_status_list); in o2net_prep_nsw()
300 spin_unlock(&nn->nn_lock); in o2net_prep_nsw()
310 static void o2net_complete_nsw_locked(struct o2net_node *nn, in o2net_complete_nsw_locked() argument
315 assert_spin_locked(&nn->nn_lock); in o2net_complete_nsw_locked()
[all …]
/linux-6.1.9/lib/reed_solomon/
Ddecode_rs.c16 int nn = rs->nn; variable
26 uint16_t msk = (uint16_t) rs->nn;
42 pad = nn - nroots - len;
43 BUG_ON(pad < 0 || pad >= nn - nroots);
51 if (s[i] != nn)
112 prim * (nn - 1 - (eras_pos[0] + pad)))];
114 u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
117 if (tmp != nn) {
138 if ((lambda[i] != 0) && (s[r - i - 1] != nn)) {
146 if (discr_r == nn) {
[all …]
/linux-6.1.9/drivers/net/ethernet/netronome/nfp/bpf/
Dmain.c24 static bool nfp_net_ebpf_capable(struct nfp_net *nn) in nfp_net_ebpf_capable() argument
27 struct nfp_app_bpf *bpf = nn->app->priv; in nfp_net_ebpf_capable()
29 return nn->cap & NFP_NET_CFG_CTRL_BPF && in nfp_net_ebpf_capable()
31 nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version; in nfp_net_ebpf_capable()
38 nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, in nfp_bpf_xdp_offload() argument
43 if (!nfp_net_ebpf_capable(nn)) in nfp_bpf_xdp_offload()
46 running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; in nfp_bpf_xdp_offload()
47 xdp_running = running && nn->xdp_hw.prog; in nfp_bpf_xdp_offload()
54 return nfp_net_bpf_offload(nn, prog, running, extack); in nfp_bpf_xdp_offload()
57 static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) in nfp_bpf_extra_cap() argument
[all …]
Doffload.c221 struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); in nfp_bpf_translate() local
230 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); in nfp_bpf_translate()
425 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) in nfp_ndo_bpf() argument
484 bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog, in nfp_bpf_offload_check_mtu() argument
489 fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; in nfp_bpf_offload_check_mtu()
496 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, in nfp_net_bpf_load() argument
505 if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) { in nfp_net_bpf_load()
510 max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; in nfp_net_bpf_load()
516 max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); in nfp_net_bpf_load()
522 img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv); in nfp_net_bpf_load()
[all …]
/linux-6.1.9/fs/nfs/
Dclient.c201 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs_cleanup_cb_ident_idr() local
203 idr_destroy(&nn->cb_ident_idr); in nfs_cleanup_cb_ident_idr()
209 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); in nfs_cb_idr_remove_locked() local
212 idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident); in nfs_cb_idr_remove_locked()
257 struct nfs_net *nn; in nfs_put_client() local
262 nn = net_generic(clp->cl_net, nfs_net_id); in nfs_put_client()
264 if (refcount_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) { in nfs_put_client()
267 spin_unlock(&nn->nfs_client_lock); in nfs_put_client()
284 struct nfs_net *nn = net_generic(data->net, nfs_net_id); in nfs_match_client() local
288 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { in nfs_match_client()
[all …]

12345