1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #ifndef _GVE_H_
8 #define _GVE_H_
9
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14
15 #include "gve_desc.h"
16 #include "gve_desc_dqo.h"
17
18 #ifndef PCI_VENDOR_ID_GOOGLE
19 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
20 #endif
21
22 #define PCI_DEV_ID_GVNIC 0x0042
23
24 #define GVE_REGISTER_BAR 0
25 #define GVE_DOORBELL_BAR 2
26
27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
28 #define GVE_TX_MAX_IOVEC 4
29 /* 1 for management, 1 for rx, 1 for tx */
30 #define GVE_MIN_MSIX 3
31
32 /* Numbers of gve tx/rx stats in stats report. */
33 #define GVE_TX_STATS_REPORT_NUM 6
34 #define GVE_RX_STATS_REPORT_NUM 2
35
36 /* Interval to schedule a stats report update, 20000ms. */
37 #define GVE_STATS_REPORT_TIMER_PERIOD 20000
38
39 /* Numbers of NIC tx/rx stats in stats report. */
40 #define NIC_TX_STATS_REPORT_NUM 0
41 #define NIC_RX_STATS_REPORT_NUM 4
42
43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44
45 /* PTYPEs are always 10 bits. */
46 #define GVE_NUM_PTYPES 1024
47
48 #define GVE_RX_BUFFER_SIZE_DQO 2048
49
50 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
51 struct gve_rx_desc_queue {
52 struct gve_rx_desc *desc_ring; /* the descriptor ring */
53 dma_addr_t bus; /* the bus for the desc_ring */
54 u8 seqno; /* the next expected seqno for this desc*/
55 };
56
57 /* The page info for a single slot in the RX data queue */
58 struct gve_rx_slot_page_info {
59 struct page *page;
60 void *page_address;
61 u32 page_offset; /* offset to write to in page */
62 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
63 u8 can_flip;
64 };
65
66 /* A list of pages registered with the device during setup and used by a queue
67 * as buffers
68 */
69 struct gve_queue_page_list {
70 u32 id; /* unique id */
71 u32 num_entries;
72 struct page **pages; /* list of num_entries pages */
73 dma_addr_t *page_buses; /* the dma addrs of the pages */
74 };
75
76 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
77 struct gve_rx_data_queue {
78 union gve_rx_data_slot *data_ring; /* read by NIC */
79 dma_addr_t data_bus; /* dma mapping of the slots */
80 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
81 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
82 u8 raw_addressing; /* use raw_addressing? */
83 };
84
85 struct gve_priv;
86
87 /* RX buffer queue for posting buffers to HW.
88 * Each RX (completion) queue has a corresponding buffer queue.
89 */
90 struct gve_rx_buf_queue_dqo {
91 struct gve_rx_desc_dqo *desc_ring;
92 dma_addr_t bus;
93 u32 head; /* Pointer to start cleaning buffers at. */
94 u32 tail; /* Last posted buffer index + 1 */
95 u32 mask; /* Mask for indices to the size of the ring */
96 };
97
98 /* RX completion queue to receive packets from HW. */
99 struct gve_rx_compl_queue_dqo {
100 struct gve_rx_compl_desc_dqo *desc_ring;
101 dma_addr_t bus;
102
103 /* Number of slots which did not have a buffer posted yet. We should not
104 * post more buffers than the queue size to avoid HW overrunning the
105 * queue.
106 */
107 int num_free_slots;
108
109 /* HW uses a "generation bit" to notify SW of new descriptors. When a
110 * descriptor's generation bit is different from the current generation,
111 * that descriptor is ready to be consumed by SW.
112 */
113 u8 cur_gen_bit;
114
115 /* Pointer into desc_ring where the next completion descriptor will be
116 * received.
117 */
118 u32 head;
119 u32 mask; /* Mask for indices to the size of the ring */
120 };
121
122 /* Stores state for tracking buffers posted to HW */
123 struct gve_rx_buf_state_dqo {
124 /* The page posted to HW. */
125 struct gve_rx_slot_page_info page_info;
126
127 /* The DMA address corresponding to `page_info`. */
128 dma_addr_t addr;
129
130 /* Last offset into the page when it only had a single reference, at
131 * which point every other offset is free to be reused.
132 */
133 u32 last_single_ref_offset;
134
135 /* Linked list index to next element in the list, or -1 if none */
136 s16 next;
137 };
138
139 /* `head` and `tail` are indices into an array, or -1 if empty. */
140 struct gve_index_list {
141 s16 head;
142 s16 tail;
143 };
144
145 /* A single received packet split across multiple buffers may be
146 * reconstructed using the information in this structure.
147 */
148 struct gve_rx_ctx {
149 /* head and tail of skb chain for the current packet or NULL if none */
150 struct sk_buff *skb_head;
151 struct sk_buff *skb_tail;
152 u16 total_expected_size;
153 u8 expected_frag_cnt;
154 u8 curr_frag_cnt;
155 u8 reuse_frags;
156 };
157
158 /* Contains datapath state used to represent an RX queue. */
159 struct gve_rx_ring {
160 struct gve_priv *gve;
161 union {
162 /* GQI fields */
163 struct {
164 struct gve_rx_desc_queue desc;
165 struct gve_rx_data_queue data;
166
167 /* threshold for posting new buffs and descs */
168 u32 db_threshold;
169 u16 packet_buffer_size;
170 };
171
172 /* DQO fields. */
173 struct {
174 struct gve_rx_buf_queue_dqo bufq;
175 struct gve_rx_compl_queue_dqo complq;
176
177 struct gve_rx_buf_state_dqo *buf_states;
178 u16 num_buf_states;
179
180 /* Linked list of gve_rx_buf_state_dqo. Index into
181 * buf_states, or -1 if empty.
182 */
183 s16 free_buf_states;
184
185 /* Linked list of gve_rx_buf_state_dqo. Indexes into
186 * buf_states, or -1 if empty.
187 *
188 * This list contains buf_states which are pointing to
189 * valid buffers.
190 *
191 * We use a FIFO here in order to increase the
192 * probability that buffers can be reused by increasing
193 * the time between usages.
194 */
195 struct gve_index_list recycled_buf_states;
196
197 /* Linked list of gve_rx_buf_state_dqo. Indexes into
198 * buf_states, or -1 if empty.
199 *
200 * This list contains buf_states which have buffers
201 * which cannot be reused yet.
202 */
203 struct gve_index_list used_buf_states;
204 } dqo;
205 };
206
207 u64 rbytes; /* free-running bytes received */
208 u64 rpackets; /* free-running packets received */
209 u32 cnt; /* free-running total number of completed packets */
210 u32 fill_cnt; /* free-running total number of descs and buffs posted */
211 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
212 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
213 u64 rx_copied_pkt; /* free-running total number of copied packets */
214 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
215 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
216 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
217 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
218 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
219 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
220 u32 q_num; /* queue index */
221 u32 ntfy_id; /* notification block index */
222 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
223 dma_addr_t q_resources_bus; /* dma address for the queue resources */
224 struct u64_stats_sync statss; /* sync stats for 32bit archs */
225
226 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
227 };
228
229 /* A TX desc ring entry */
230 union gve_tx_desc {
231 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
232 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
233 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
234 };
235
236 /* Tracks the memory in the fifo occupied by a segment of a packet */
237 struct gve_tx_iovec {
238 u32 iov_offset; /* offset into this segment */
239 u32 iov_len; /* length */
240 u32 iov_padding; /* padding associated with this segment */
241 };
242
243 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
244 * ring entry but only used for a pkt_desc not a seg_desc
245 */
246 struct gve_tx_buffer_state {
247 struct sk_buff *skb; /* skb for this pkt */
248 union {
249 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
250 struct {
251 DEFINE_DMA_UNMAP_ADDR(dma);
252 DEFINE_DMA_UNMAP_LEN(len);
253 };
254 };
255 };
256
257 /* A TX buffer - each queue has one */
258 struct gve_tx_fifo {
259 void *base; /* address of base of FIFO */
260 u32 size; /* total size */
261 atomic_t available; /* how much space is still available */
262 u32 head; /* offset to write at */
263 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
264 };
265
266 /* TX descriptor for DQO format */
267 union gve_tx_desc_dqo {
268 struct gve_tx_pkt_desc_dqo pkt;
269 struct gve_tx_tso_context_desc_dqo tso_ctx;
270 struct gve_tx_general_context_desc_dqo general_ctx;
271 };
272
273 enum gve_packet_state {
274 /* Packet is in free list, available to be allocated.
275 * This should always be zero since state is not explicitly initialized.
276 */
277 GVE_PACKET_STATE_UNALLOCATED,
278 /* Packet is expecting a regular data completion or miss completion */
279 GVE_PACKET_STATE_PENDING_DATA_COMPL,
280 /* Packet has received a miss completion and is expecting a
281 * re-injection completion.
282 */
283 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
284 /* No valid completion received within the specified timeout. */
285 GVE_PACKET_STATE_TIMED_OUT_COMPL,
286 };
287
288 struct gve_tx_pending_packet_dqo {
289 struct sk_buff *skb; /* skb for this packet */
290
291 /* 0th element corresponds to the linear portion of `skb`, should be
292 * unmapped with `dma_unmap_single`.
293 *
294 * All others correspond to `skb`'s frags and should be unmapped with
295 * `dma_unmap_page`.
296 */
297 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
298 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
299 u16 num_bufs;
300
301 /* Linked list index to next element in the list, or -1 if none */
302 s16 next;
303
304 /* Linked list index to prev element in the list, or -1 if none.
305 * Used for tracking either outstanding miss completions or prematurely
306 * freed packets.
307 */
308 s16 prev;
309
310 /* Identifies the current state of the packet as defined in
311 * `enum gve_packet_state`.
312 */
313 u8 state;
314
315 /* If packet is an outstanding miss completion, then the packet is
316 * freed if the corresponding re-injection completion is not received
317 * before kernel jiffies exceeds timeout_jiffies.
318 */
319 unsigned long timeout_jiffies;
320 };
321
322 /* Contains datapath state used to represent a TX queue. */
323 struct gve_tx_ring {
324 /* Cacheline 0 -- Accessed & dirtied during transmit */
325 union {
326 /* GQI fields */
327 struct {
328 struct gve_tx_fifo tx_fifo;
329 u32 req; /* driver tracked head pointer */
330 u32 done; /* driver tracked tail pointer */
331 };
332
333 /* DQO fields. */
334 struct {
335 /* Linked list of gve_tx_pending_packet_dqo. Index into
336 * pending_packets, or -1 if empty.
337 *
338 * This is a consumer list owned by the TX path. When it
339 * runs out, the producer list is stolen from the
340 * completion handling path
341 * (dqo_compl.free_pending_packets).
342 */
343 s16 free_pending_packets;
344
345 /* Cached value of `dqo_compl.hw_tx_head` */
346 u32 head;
347 u32 tail; /* Last posted buffer index + 1 */
348
349 /* Index of the last descriptor with "report event" bit
350 * set.
351 */
352 u32 last_re_idx;
353 } dqo_tx;
354 };
355
356 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
357 union {
358 /* GQI fields */
359 struct {
360 /* Spinlock for when cleanup in progress */
361 spinlock_t clean_lock;
362 };
363
364 /* DQO fields. */
365 struct {
366 u32 head; /* Last read on compl_desc */
367
368 /* Tracks the current gen bit of compl_q */
369 u8 cur_gen_bit;
370
371 /* Linked list of gve_tx_pending_packet_dqo. Index into
372 * pending_packets, or -1 if empty.
373 *
374 * This is the producer list, owned by the completion
375 * handling path. When the consumer list
376 * (dqo_tx.free_pending_packets) is runs out, this list
377 * will be stolen.
378 */
379 atomic_t free_pending_packets;
380
381 /* Last TX ring index fetched by HW */
382 atomic_t hw_tx_head;
383
384 /* List to track pending packets which received a miss
385 * completion but not a corresponding reinjection.
386 */
387 struct gve_index_list miss_completions;
388
389 /* List to track pending packets that were completed
390 * before receiving a valid completion because they
391 * reached a specified timeout.
392 */
393 struct gve_index_list timed_out_completions;
394 } dqo_compl;
395 } ____cacheline_aligned;
396 u64 pkt_done; /* free-running - total packets completed */
397 u64 bytes_done; /* free-running - total bytes completed */
398 u64 dropped_pkt; /* free-running - total packets dropped */
399 u64 dma_mapping_error; /* count of dma mapping errors */
400
401 /* Cacheline 2 -- Read-mostly fields */
402 union {
403 /* GQI fields */
404 struct {
405 union gve_tx_desc *desc;
406
407 /* Maps 1:1 to a desc */
408 struct gve_tx_buffer_state *info;
409 };
410
411 /* DQO fields. */
412 struct {
413 union gve_tx_desc_dqo *tx_ring;
414 struct gve_tx_compl_desc *compl_ring;
415
416 struct gve_tx_pending_packet_dqo *pending_packets;
417 s16 num_pending_packets;
418
419 u32 complq_mask; /* complq size is complq_mask + 1 */
420 } dqo;
421 } ____cacheline_aligned;
422 struct netdev_queue *netdev_txq;
423 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
424 struct device *dev;
425 u32 mask; /* masks req and done down to queue size */
426 u8 raw_addressing; /* use raw_addressing? */
427
428 /* Slow-path fields */
429 u32 q_num ____cacheline_aligned; /* queue idx */
430 u32 stop_queue; /* count of queue stops */
431 u32 wake_queue; /* count of queue wakes */
432 u32 queue_timeout; /* count of queue timeouts */
433 u32 ntfy_id; /* notification block index */
434 u32 last_kick_msec; /* Last time the queue was kicked */
435 dma_addr_t bus; /* dma address of the descr ring */
436 dma_addr_t q_resources_bus; /* dma address of the queue resources */
437 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
438 struct u64_stats_sync statss; /* sync stats for 32bit archs */
439 } ____cacheline_aligned;
440
441 /* Wraps the info for one irq including the napi struct and the queues
442 * associated with that irq.
443 */
444 struct gve_notify_block {
445 __be32 *irq_db_index; /* pointer to idx into Bar2 */
446 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
447 struct napi_struct napi; /* kernel napi struct for this block */
448 struct gve_priv *priv;
449 struct gve_tx_ring *tx; /* tx rings on this block */
450 struct gve_rx_ring *rx; /* rx rings on this block */
451 };
452
453 /* Tracks allowed and current queue settings */
454 struct gve_queue_config {
455 u16 max_queues;
456 u16 num_queues; /* current */
457 };
458
459 /* Tracks the available and used qpl IDs */
460 struct gve_qpl_config {
461 u32 qpl_map_size; /* map memory size */
462 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
463 };
464
465 struct gve_options_dqo_rda {
466 u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
467 u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
468 };
469
470 struct gve_irq_db {
471 __be32 index;
472 } ____cacheline_aligned;
473
474 struct gve_ptype {
475 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
476 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
477 };
478
479 struct gve_ptype_lut {
480 struct gve_ptype ptypes[GVE_NUM_PTYPES];
481 };
482
483 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
484 * when the entire configure_device_resources command is zeroed out and the
485 * queue_format is not specified.
486 */
487 enum gve_queue_format {
488 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
489 GVE_GQI_RDA_FORMAT = 0x1,
490 GVE_GQI_QPL_FORMAT = 0x2,
491 GVE_DQO_RDA_FORMAT = 0x3,
492 };
493
494 struct gve_priv {
495 struct net_device *dev;
496 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
497 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
498 struct gve_queue_page_list *qpls; /* array of num qpls */
499 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
500 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
501 dma_addr_t irq_db_indices_bus;
502 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
503 char mgmt_msix_name[IFNAMSIZ + 16];
504 u32 mgmt_msix_idx;
505 __be32 *counter_array; /* array of num_event_counters */
506 dma_addr_t counter_array_bus;
507
508 u16 num_event_counters;
509 u16 tx_desc_cnt; /* num desc per ring */
510 u16 rx_desc_cnt; /* num desc per ring */
511 u16 tx_pages_per_qpl; /* tx buffer length */
512 u16 rx_data_slot_cnt; /* rx buffer length */
513 u64 max_registered_pages;
514 u64 num_registered_pages; /* num pages registered with NIC */
515 u32 rx_copybreak; /* copy packets smaller than this */
516 u16 default_num_queues; /* default num queues to set up */
517
518 struct gve_queue_config tx_cfg;
519 struct gve_queue_config rx_cfg;
520 struct gve_qpl_config qpl_cfg; /* map used QPL ids */
521 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
522
523 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
524 __be32 __iomem *db_bar2; /* "array" of doorbells */
525 u32 msg_enable; /* level for netif* netdev print macros */
526 struct pci_dev *pdev;
527
528 /* metrics */
529 u32 tx_timeo_cnt;
530
531 /* Admin queue - see gve_adminq.h*/
532 union gve_adminq_command *adminq;
533 dma_addr_t adminq_bus_addr;
534 u32 adminq_mask; /* masks prod_cnt to adminq size */
535 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
536 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
537 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
538 /* free-running count of per AQ cmd executed */
539 u32 adminq_describe_device_cnt;
540 u32 adminq_cfg_device_resources_cnt;
541 u32 adminq_register_page_list_cnt;
542 u32 adminq_unregister_page_list_cnt;
543 u32 adminq_create_tx_queue_cnt;
544 u32 adminq_create_rx_queue_cnt;
545 u32 adminq_destroy_tx_queue_cnt;
546 u32 adminq_destroy_rx_queue_cnt;
547 u32 adminq_dcfg_device_resources_cnt;
548 u32 adminq_set_driver_parameter_cnt;
549 u32 adminq_report_stats_cnt;
550 u32 adminq_report_link_speed_cnt;
551 u32 adminq_get_ptype_map_cnt;
552
553 /* Global stats */
554 u32 interface_up_cnt; /* count of times interface turned up since last reset */
555 u32 interface_down_cnt; /* count of times interface turned down since last reset */
556 u32 reset_cnt; /* count of reset */
557 u32 page_alloc_fail; /* count of page alloc fails */
558 u32 dma_mapping_error; /* count of dma mapping errors */
559 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
560 u32 suspend_cnt; /* count of times suspended */
561 u32 resume_cnt; /* count of times resumed */
562 struct workqueue_struct *gve_wq;
563 struct work_struct service_task;
564 struct work_struct stats_report_task;
565 unsigned long service_task_flags;
566 unsigned long state_flags;
567
568 struct gve_stats_report *stats_report;
569 u64 stats_report_len;
570 dma_addr_t stats_report_bus; /* dma address for the stats report */
571 unsigned long ethtool_flags;
572
573 unsigned long stats_report_timer_period;
574 struct timer_list stats_report_timer;
575
576 /* Gvnic device link speed from hypervisor. */
577 u64 link_speed;
578 bool up_before_suspend; /* True if dev was up before suspend */
579
580 struct gve_options_dqo_rda options_dqo_rda;
581 struct gve_ptype_lut *ptype_lut_dqo;
582
583 /* Must be a power of two. */
584 int data_buffer_size_dqo;
585
586 enum gve_queue_format queue_format;
587
588 /* Interrupt coalescing settings */
589 u32 tx_coalesce_usecs;
590 u32 rx_coalesce_usecs;
591 };
592
593 enum gve_service_task_flags_bit {
594 GVE_PRIV_FLAGS_DO_RESET = 1,
595 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
596 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
597 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
598 };
599
600 enum gve_state_flags_bit {
601 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
602 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
603 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
604 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
605 };
606
607 enum gve_ethtool_flags_bit {
608 GVE_PRIV_FLAGS_REPORT_STATS = 0,
609 };
610
gve_get_do_reset(struct gve_priv * priv)611 static inline bool gve_get_do_reset(struct gve_priv *priv)
612 {
613 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
614 }
615
gve_set_do_reset(struct gve_priv * priv)616 static inline void gve_set_do_reset(struct gve_priv *priv)
617 {
618 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
619 }
620
gve_clear_do_reset(struct gve_priv * priv)621 static inline void gve_clear_do_reset(struct gve_priv *priv)
622 {
623 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
624 }
625
gve_get_reset_in_progress(struct gve_priv * priv)626 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
627 {
628 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
629 &priv->service_task_flags);
630 }
631
gve_set_reset_in_progress(struct gve_priv * priv)632 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
633 {
634 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
635 }
636
gve_clear_reset_in_progress(struct gve_priv * priv)637 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
638 {
639 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
640 }
641
gve_get_probe_in_progress(struct gve_priv * priv)642 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
643 {
644 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
645 &priv->service_task_flags);
646 }
647
gve_set_probe_in_progress(struct gve_priv * priv)648 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
649 {
650 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
651 }
652
gve_clear_probe_in_progress(struct gve_priv * priv)653 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
654 {
655 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
656 }
657
gve_get_do_report_stats(struct gve_priv * priv)658 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
659 {
660 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
661 &priv->service_task_flags);
662 }
663
gve_set_do_report_stats(struct gve_priv * priv)664 static inline void gve_set_do_report_stats(struct gve_priv *priv)
665 {
666 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
667 }
668
gve_clear_do_report_stats(struct gve_priv * priv)669 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
670 {
671 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
672 }
673
gve_get_admin_queue_ok(struct gve_priv * priv)674 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
675 {
676 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
677 }
678
gve_set_admin_queue_ok(struct gve_priv * priv)679 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
680 {
681 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
682 }
683
gve_clear_admin_queue_ok(struct gve_priv * priv)684 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
685 {
686 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
687 }
688
gve_get_device_resources_ok(struct gve_priv * priv)689 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
690 {
691 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
692 }
693
gve_set_device_resources_ok(struct gve_priv * priv)694 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
695 {
696 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
697 }
698
gve_clear_device_resources_ok(struct gve_priv * priv)699 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
700 {
701 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
702 }
703
gve_get_device_rings_ok(struct gve_priv * priv)704 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
705 {
706 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
707 }
708
gve_set_device_rings_ok(struct gve_priv * priv)709 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
710 {
711 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
712 }
713
gve_clear_device_rings_ok(struct gve_priv * priv)714 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
715 {
716 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
717 }
718
gve_get_napi_enabled(struct gve_priv * priv)719 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
720 {
721 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
722 }
723
gve_set_napi_enabled(struct gve_priv * priv)724 static inline void gve_set_napi_enabled(struct gve_priv *priv)
725 {
726 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
727 }
728
gve_clear_napi_enabled(struct gve_priv * priv)729 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
730 {
731 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
732 }
733
gve_get_report_stats(struct gve_priv * priv)734 static inline bool gve_get_report_stats(struct gve_priv *priv)
735 {
736 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
737 }
738
gve_clear_report_stats(struct gve_priv * priv)739 static inline void gve_clear_report_stats(struct gve_priv *priv)
740 {
741 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
742 }
743
744 /* Returns the address of the ntfy_blocks irq doorbell
745 */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)746 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
747 struct gve_notify_block *block)
748 {
749 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
750 }
751
752 /* Returns the index into ntfy_blocks of the given tx ring's block
753 */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)754 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
755 {
756 return queue_idx;
757 }
758
759 /* Returns the index into ntfy_blocks of the given rx ring's block
760 */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)761 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
762 {
763 return (priv->num_ntfy_blks / 2) + queue_idx;
764 }
765
766 /* Returns the number of tx queue page lists
767 */
gve_num_tx_qpls(struct gve_priv * priv)768 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
769 {
770 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
771 return 0;
772
773 return priv->tx_cfg.num_queues;
774 }
775
776 /* Returns the number of rx queue page lists
777 */
gve_num_rx_qpls(struct gve_priv * priv)778 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
779 {
780 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
781 return 0;
782
783 return priv->rx_cfg.num_queues;
784 }
785
786 /* Returns a pointer to the next available tx qpl in the list of qpls
787 */
788 static inline
gve_assign_tx_qpl(struct gve_priv * priv)789 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
790 {
791 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
792 priv->qpl_cfg.qpl_map_size);
793
794 /* we are out of tx qpls */
795 if (id >= gve_num_tx_qpls(priv))
796 return NULL;
797
798 set_bit(id, priv->qpl_cfg.qpl_id_map);
799 return &priv->qpls[id];
800 }
801
802 /* Returns a pointer to the next available rx qpl in the list of qpls
803 */
804 static inline
gve_assign_rx_qpl(struct gve_priv * priv)805 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
806 {
807 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
808 priv->qpl_cfg.qpl_map_size,
809 gve_num_tx_qpls(priv));
810
811 /* we are out of rx qpls */
812 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
813 return NULL;
814
815 set_bit(id, priv->qpl_cfg.qpl_id_map);
816 return &priv->qpls[id];
817 }
818
819 /* Unassigns the qpl with the given id
820 */
gve_unassign_qpl(struct gve_priv * priv,int id)821 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
822 {
823 clear_bit(id, priv->qpl_cfg.qpl_id_map);
824 }
825
826 /* Returns the correct dma direction for tx and rx qpls
827 */
gve_qpl_dma_dir(struct gve_priv * priv,int id)828 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
829 int id)
830 {
831 if (id < gve_num_tx_qpls(priv))
832 return DMA_TO_DEVICE;
833 else
834 return DMA_FROM_DEVICE;
835 }
836
gve_is_gqi(struct gve_priv * priv)837 static inline bool gve_is_gqi(struct gve_priv *priv)
838 {
839 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
840 priv->queue_format == GVE_GQI_QPL_FORMAT;
841 }
842
843 /* buffers */
844 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
845 struct page **page, dma_addr_t *dma,
846 enum dma_data_direction, gfp_t gfp_flags);
847 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
848 enum dma_data_direction);
849 /* tx handling */
850 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
851 bool gve_tx_poll(struct gve_notify_block *block, int budget);
852 int gve_tx_alloc_rings(struct gve_priv *priv);
853 void gve_tx_free_rings_gqi(struct gve_priv *priv);
854 u32 gve_tx_load_event_counter(struct gve_priv *priv,
855 struct gve_tx_ring *tx);
856 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
857 /* rx handling */
858 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
859 int gve_rx_poll(struct gve_notify_block *block, int budget);
860 bool gve_rx_work_pending(struct gve_rx_ring *rx);
861 int gve_rx_alloc_rings(struct gve_priv *priv);
862 void gve_rx_free_rings_gqi(struct gve_priv *priv);
863 /* Reset */
864 void gve_schedule_reset(struct gve_priv *priv);
865 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
866 int gve_adjust_queues(struct gve_priv *priv,
867 struct gve_queue_config new_rx_config,
868 struct gve_queue_config new_tx_config);
869 /* report stats handling */
870 void gve_handle_report_stats(struct gve_priv *priv);
871 /* exported by ethtool.c */
872 extern const struct ethtool_ops gve_ethtool_ops;
873 /* needed by ethtool */
874 extern const char gve_version_str[];
875 #endif /* _GVE_H_ */
876