1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
9 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
10 */
11
12 #include "aq_vec.h"
13
14 struct aq_vec_s {
15 const struct aq_hw_ops *aq_hw_ops;
16 struct aq_hw_s *aq_hw;
17 struct aq_nic_s *aq_nic;
18 unsigned int tx_rings;
19 unsigned int rx_rings;
20 struct aq_ring_param_s aq_ring_param;
21 struct napi_struct napi;
22 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
23 };
24
25 #define AQ_VEC_TX_ID 0
26 #define AQ_VEC_RX_ID 1
27
aq_vec_poll(struct napi_struct * napi,int budget)28 static int aq_vec_poll(struct napi_struct *napi, int budget)
29 {
30 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
31 unsigned int sw_tail_old = 0U;
32 struct aq_ring_s *ring = NULL;
33 bool was_tx_cleaned = true;
34 unsigned int i = 0U;
35 int work_done = 0;
36 int err = 0;
37
38 if (!self) {
39 err = -EINVAL;
40 } else {
41 for (i = 0U; self->tx_rings > i; ++i) {
42 ring = self->ring[i];
43 u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
44 ring[AQ_VEC_RX_ID].stats.rx.polls++;
45 u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
46 if (self->aq_hw_ops->hw_ring_tx_head_update) {
47 err = self->aq_hw_ops->hw_ring_tx_head_update(
48 self->aq_hw,
49 &ring[AQ_VEC_TX_ID]);
50 if (err < 0)
51 goto err_exit;
52 }
53
54 if (ring[AQ_VEC_TX_ID].sw_head !=
55 ring[AQ_VEC_TX_ID].hw_head) {
56 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
57 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
58 }
59
60 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
61 &ring[AQ_VEC_RX_ID]);
62 if (err < 0)
63 goto err_exit;
64
65 if (ring[AQ_VEC_RX_ID].sw_head !=
66 ring[AQ_VEC_RX_ID].hw_head) {
67 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
68 napi,
69 &work_done,
70 budget - work_done);
71 if (err < 0)
72 goto err_exit;
73
74 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
75
76 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
77 if (err < 0)
78 goto err_exit;
79
80 err = self->aq_hw_ops->hw_ring_rx_fill(
81 self->aq_hw,
82 &ring[AQ_VEC_RX_ID], sw_tail_old);
83 if (err < 0)
84 goto err_exit;
85 }
86 }
87
88 err_exit:
89 if (!was_tx_cleaned)
90 work_done = budget;
91
92 if (work_done < budget) {
93 napi_complete_done(napi, work_done);
94 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
95 1U << self->aq_ring_param.vec_idx);
96 }
97 }
98
99 return work_done;
100 }
101
aq_vec_alloc(struct aq_nic_s * aq_nic,unsigned int idx,struct aq_nic_cfg_s * aq_nic_cfg)102 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
103 struct aq_nic_cfg_s *aq_nic_cfg)
104 {
105 struct aq_vec_s *self = NULL;
106
107 self = kzalloc(sizeof(*self), GFP_KERNEL);
108 if (!self)
109 goto err_exit;
110
111 self->aq_nic = aq_nic;
112 self->aq_ring_param.vec_idx = idx;
113 self->aq_ring_param.cpu =
114 idx + aq_nic_cfg->aq_rss.base_cpu_number;
115
116 cpumask_set_cpu(self->aq_ring_param.cpu,
117 &self->aq_ring_param.affinity_mask);
118
119 self->tx_rings = 0;
120 self->rx_rings = 0;
121
122 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
123
124 err_exit:
125 return self;
126 }
127
aq_vec_ring_alloc(struct aq_vec_s * self,struct aq_nic_s * aq_nic,unsigned int idx,struct aq_nic_cfg_s * aq_nic_cfg)128 int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
129 unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
130 {
131 struct aq_ring_s *ring = NULL;
132 unsigned int i = 0U;
133 int err = 0;
134
135 for (i = 0; i < aq_nic_cfg->tcs; ++i) {
136 const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
137 i, idx);
138
139 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
140 idx_ring, aq_nic_cfg);
141 if (!ring) {
142 err = -ENOMEM;
143 goto err_exit;
144 }
145
146 ++self->tx_rings;
147
148 aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
149
150 if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
151 aq_nic->ndev, idx,
152 self->napi.napi_id) < 0) {
153 err = -ENOMEM;
154 goto err_exit;
155 }
156 if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
157 MEM_TYPE_PAGE_SHARED, NULL) < 0) {
158 xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
159 err = -ENOMEM;
160 goto err_exit;
161 }
162
163 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
164 idx_ring, aq_nic_cfg);
165 if (!ring) {
166 xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
167 err = -ENOMEM;
168 goto err_exit;
169 }
170
171 ++self->rx_rings;
172 }
173
174 err_exit:
175 if (err < 0) {
176 aq_vec_ring_free(self);
177 self = NULL;
178 }
179
180 return err;
181 }
182
aq_vec_init(struct aq_vec_s * self,const struct aq_hw_ops * aq_hw_ops,struct aq_hw_s * aq_hw)183 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
184 struct aq_hw_s *aq_hw)
185 {
186 struct aq_ring_s *ring = NULL;
187 unsigned int i = 0U;
188 int err = 0;
189
190 self->aq_hw_ops = aq_hw_ops;
191 self->aq_hw = aq_hw;
192
193 for (i = 0U; self->tx_rings > i; ++i) {
194 ring = self->ring[i];
195 err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
196 if (err < 0)
197 goto err_exit;
198
199 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
200 &ring[AQ_VEC_TX_ID],
201 &self->aq_ring_param);
202 if (err < 0)
203 goto err_exit;
204
205 err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
206 if (err < 0)
207 goto err_exit;
208
209 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
210 &ring[AQ_VEC_RX_ID],
211 &self->aq_ring_param);
212 if (err < 0)
213 goto err_exit;
214
215 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
216 if (err < 0)
217 goto err_exit;
218
219 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
220 &ring[AQ_VEC_RX_ID], 0U);
221 if (err < 0)
222 goto err_exit;
223 }
224
225 err_exit:
226 return err;
227 }
228
aq_vec_start(struct aq_vec_s * self)229 int aq_vec_start(struct aq_vec_s *self)
230 {
231 struct aq_ring_s *ring = NULL;
232 unsigned int i = 0U;
233 int err = 0;
234
235 for (i = 0U; self->tx_rings > i; ++i) {
236 ring = self->ring[i];
237 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
238 &ring[AQ_VEC_TX_ID]);
239 if (err < 0)
240 goto err_exit;
241
242 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
243 &ring[AQ_VEC_RX_ID]);
244 if (err < 0)
245 goto err_exit;
246 }
247
248 napi_enable(&self->napi);
249
250 err_exit:
251 return err;
252 }
253
aq_vec_stop(struct aq_vec_s * self)254 void aq_vec_stop(struct aq_vec_s *self)
255 {
256 struct aq_ring_s *ring = NULL;
257 unsigned int i = 0U;
258
259 for (i = 0U; self->tx_rings > i; ++i) {
260 ring = self->ring[i];
261 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
262 &ring[AQ_VEC_TX_ID]);
263
264 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
265 &ring[AQ_VEC_RX_ID]);
266 }
267
268 napi_disable(&self->napi);
269 }
270
aq_vec_deinit(struct aq_vec_s * self)271 void aq_vec_deinit(struct aq_vec_s *self)
272 {
273 struct aq_ring_s *ring = NULL;
274 unsigned int i = 0U;
275
276 if (!self)
277 goto err_exit;
278
279 for (i = 0U; self->tx_rings > i; ++i) {
280 ring = self->ring[i];
281 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
282 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
283 }
284
285 err_exit:;
286 }
287
aq_vec_free(struct aq_vec_s * self)288 void aq_vec_free(struct aq_vec_s *self)
289 {
290 if (!self)
291 goto err_exit;
292
293 netif_napi_del(&self->napi);
294
295 kfree(self);
296
297 err_exit:;
298 }
299
aq_vec_ring_free(struct aq_vec_s * self)300 void aq_vec_ring_free(struct aq_vec_s *self)
301 {
302 struct aq_ring_s *ring = NULL;
303 unsigned int i = 0U;
304
305 if (!self)
306 goto err_exit;
307
308 for (i = 0U; self->tx_rings > i; ++i) {
309 ring = self->ring[i];
310 aq_ring_free(&ring[AQ_VEC_TX_ID]);
311 if (i < self->rx_rings) {
312 xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
313 aq_ring_free(&ring[AQ_VEC_RX_ID]);
314 }
315 }
316
317 self->tx_rings = 0;
318 self->rx_rings = 0;
319 err_exit:;
320 }
321
aq_vec_isr(int irq,void * private)322 irqreturn_t aq_vec_isr(int irq, void *private)
323 {
324 struct aq_vec_s *self = private;
325 int err = 0;
326
327 if (!self) {
328 err = -EINVAL;
329 goto err_exit;
330 }
331 napi_schedule(&self->napi);
332
333 err_exit:
334 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
335 }
336
aq_vec_isr_legacy(int irq,void * private)337 irqreturn_t aq_vec_isr_legacy(int irq, void *private)
338 {
339 struct aq_vec_s *self = private;
340 u64 irq_mask = 0U;
341 int err;
342
343 if (!self)
344 return IRQ_NONE;
345 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
346 if (err < 0)
347 return IRQ_NONE;
348
349 if (irq_mask) {
350 self->aq_hw_ops->hw_irq_disable(self->aq_hw,
351 1U << self->aq_ring_param.vec_idx);
352 napi_schedule(&self->napi);
353 } else {
354 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
355 return IRQ_NONE;
356 }
357
358 return IRQ_HANDLED;
359 }
360
aq_vec_get_affinity_mask(struct aq_vec_s * self)361 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
362 {
363 return &self->aq_ring_param.affinity_mask;
364 }
365
aq_vec_is_valid_tc(struct aq_vec_s * self,const unsigned int tc)366 bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
367 {
368 return tc < self->rx_rings && tc < self->tx_rings;
369 }
370
aq_vec_get_sw_stats(struct aq_vec_s * self,const unsigned int tc,u64 * data)371 unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
372 {
373 unsigned int count;
374
375 if (!aq_vec_is_valid_tc(self, tc))
376 return 0;
377
378 count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
379 count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);
380
381 return count;
382 }
383