1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Tracepoints for Thunderbolt/USB4 networking driver 4 * 5 * Copyright (C) 2023, Intel Corporation 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 */ 8 9 #undef TRACE_SYSTEM 10 #define TRACE_SYSTEM thunderbolt_net 11 12 #if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ) 13 #define __TRACE_THUNDERBOLT_NET_H 14 15 #include <linux/dma-direction.h> 16 #include <linux/skbuff.h> 17 #include <linux/tracepoint.h> 18 19 #define DMA_DATA_DIRECTION_NAMES \ 20 { DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \ 21 { DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \ 22 { DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \ 23 { DMA_NONE, "DMA_NONE" } 24 25 DECLARE_EVENT_CLASS(tbnet_frame, 26 TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, 27 enum dma_data_direction dir), 28 TP_ARGS(index, page, phys, dir), 29 TP_STRUCT__entry( 30 __field(unsigned int, index) 31 __field(const void *, page) 32 __field(dma_addr_t, phys) 33 __field(enum dma_data_direction, dir) 34 ), 35 TP_fast_assign( 36 __entry->index = index; 37 __entry->page = page; 38 __entry->phys = phys; 39 __entry->dir = dir; 40 ), 41 TP_printk("index=%u page=%p phys=%pad dir=%s", 42 __entry->index, __entry->page, &__entry->phys, 43 __print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES)) 44 ); 45 46 DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame, 47 TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, 48 enum dma_data_direction dir), 49 TP_ARGS(index, page, phys, dir) 50 ); 51 52 DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame, 53 TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, 54 enum dma_data_direction dir), 55 TP_ARGS(index, page, phys, dir) 56 ); 57 58 DEFINE_EVENT(tbnet_frame, tbnet_free_frame, 59 TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, 60 enum dma_data_direction dir), 61 TP_ARGS(index, page, phys, dir) 62 ); 63 64 DECLARE_EVENT_CLASS(tbnet_ip_frame, 65 TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), 66 TP_ARGS(size, id, index, count), 67 TP_STRUCT__entry( 68 __field(u32, size) 69 __field(u16, id) 70 __field(u16, index) 71 __field(u32, count) 72 ), 73 TP_fast_assign( 74 __entry->size = le32_to_cpu(size); 75 __entry->id = le16_to_cpu(id); 76 __entry->index = le16_to_cpu(index); 77 __entry->count = le32_to_cpu(count); 78 ), 79 TP_printk("id=%u size=%u index=%u count=%u", 80 __entry->id, __entry->size, __entry->index, __entry->count) 81 ); 82 83 DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame, 84 TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), 85 TP_ARGS(size, id, index, count) 86 ); 87 88 DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame, 89 TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), 90 TP_ARGS(size, id, index, count) 91 ); 92 93 DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame, 94 TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), 95 TP_ARGS(size, id, index, count) 96 ); 97 98 DECLARE_EVENT_CLASS(tbnet_skb, 99 TP_PROTO(const struct sk_buff *skb), 100 TP_ARGS(skb), 101 TP_STRUCT__entry( 102 __field(const void *, addr) 103 __field(unsigned int, len) 104 __field(unsigned int, data_len) 105 __field(unsigned int, nr_frags) 106 ), 107 TP_fast_assign( 108 __entry->addr = skb; 109 __entry->len = skb->len; 110 __entry->data_len = skb->data_len; 111 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 112 ), 113 TP_printk("skb=%p len=%u data_len=%u nr_frags=%u", 114 __entry->addr, __entry->len, __entry->data_len, 115 __entry->nr_frags) 116 ); 117 118 DEFINE_EVENT(tbnet_skb, tbnet_rx_skb, 119 TP_PROTO(const struct sk_buff *skb), 120 TP_ARGS(skb) 121 ); 122 123 DEFINE_EVENT(tbnet_skb, tbnet_tx_skb, 124 TP_PROTO(const struct sk_buff *skb), 125 TP_ARGS(skb) 126 ); 127 128 DEFINE_EVENT(tbnet_skb, tbnet_consume_skb, 129 TP_PROTO(const struct sk_buff *skb), 130 TP_ARGS(skb) 131 ); 132 133 #endif /* _TRACE_THUNDERBOLT_NET_H */ 134 135 #undef TRACE_INCLUDE_PATH 136 #define TRACE_INCLUDE_PATH . 137 138 #undef TRACE_INCLUDE_FILE 139 #define TRACE_INCLUDE_FILE trace 140 141 #include <trace/define_trace.h> 142