1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32 
33 #include <xfs.h>
34 
35 static kmem_zone_t *ktrace_hdr_zone;
36 static kmem_zone_t *ktrace_ent_zone;
37 static int          ktrace_zentries;
38 
39 void
ktrace_init(int zentries)40 ktrace_init(int zentries)
41 {
42 	ktrace_zentries = zentries;
43 
44 	ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
45 					"ktrace_hdr");
46 	ASSERT(ktrace_hdr_zone);
47 
48 	ktrace_ent_zone = kmem_zone_init(ktrace_zentries
49 					* sizeof(ktrace_entry_t),
50 					"ktrace_ent");
51 	ASSERT(ktrace_ent_zone);
52 }
53 
54 void
ktrace_uninit(void)55 ktrace_uninit(void)
56 {
57 	kmem_cache_destroy(ktrace_hdr_zone);
58 	kmem_cache_destroy(ktrace_ent_zone);
59 }
60 
61 /*
62  * ktrace_alloc()
63  *
64  * Allocate a ktrace header and enough buffering for the given
65  * number of entries.
66  */
67 ktrace_t *
ktrace_alloc(int nentries,int sleep)68 ktrace_alloc(int nentries, int sleep)
69 {
70 	ktrace_t        *ktp;
71 	ktrace_entry_t  *ktep;
72 
73 	ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
74 
75 	if (ktp == (ktrace_t*)NULL) {
76 		/*
77 		 * KM_SLEEP callers don't expect failure.
78 		 */
79 		if (sleep & KM_SLEEP)
80 			panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
81 
82 		return NULL;
83 	}
84 
85 	/*
86 	 * Special treatment for buffers with the ktrace_zentries entries
87 	 */
88 	if (nentries == ktrace_zentries) {
89 		ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
90 							    sleep);
91 	} else {
92 		ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
93 							    sleep);
94 	}
95 
96 	if (ktep == NULL) {
97 		/*
98 		 * KM_SLEEP callers don't expect failure.
99 		 */
100 		if (sleep & KM_SLEEP)
101 			panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
102 
103 		kmem_free(ktp, sizeof(*ktp));
104 
105 		return NULL;
106 	}
107 
108 	spinlock_init(&(ktp->kt_lock), "kt_lock");
109 
110 	ktp->kt_entries  = ktep;
111 	ktp->kt_nentries = nentries;
112 	ktp->kt_index    = 0;
113 	ktp->kt_rollover = 0;
114 	return ktp;
115 }
116 
117 
118 /*
119  * ktrace_free()
120  *
121  * Free up the ktrace header and buffer.  It is up to the caller
122  * to ensure that no-one is referencing it.
123  */
124 void
ktrace_free(ktrace_t * ktp)125 ktrace_free(ktrace_t *ktp)
126 {
127 	int     entries_size;
128 
129 	if (ktp == (ktrace_t *)NULL)
130 		return;
131 
132 	spinlock_destroy(&ktp->kt_lock);
133 
134 	/*
135 	 * Special treatment for the Vnode trace buffer.
136 	 */
137 	if (ktp->kt_nentries == ktrace_zentries) {
138 		kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
139 	} else {
140 		entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
141 
142 		kmem_free(ktp->kt_entries, entries_size);
143 	}
144 
145 	kmem_zone_free(ktrace_hdr_zone, ktp);
146 }
147 
148 
149 /*
150  * Enter the given values into the "next" entry in the trace buffer.
151  * kt_index is always the index of the next entry to be filled.
152  */
153 void
ktrace_enter(ktrace_t * ktp,void * val0,void * val1,void * val2,void * val3,void * val4,void * val5,void * val6,void * val7,void * val8,void * val9,void * val10,void * val11,void * val12,void * val13,void * val14,void * val15)154 ktrace_enter(
155 	ktrace_t        *ktp,
156 	void            *val0,
157 	void            *val1,
158 	void            *val2,
159 	void            *val3,
160 	void            *val4,
161 	void            *val5,
162 	void            *val6,
163 	void            *val7,
164 	void            *val8,
165 	void            *val9,
166 	void            *val10,
167 	void            *val11,
168 	void            *val12,
169 	void            *val13,
170 	void            *val14,
171 	void            *val15)
172 {
173 	static lock_t   wrap_lock = SPIN_LOCK_UNLOCKED;
174 	unsigned long	flags;
175 	int             index;
176 	ktrace_entry_t  *ktep;
177 
178 	ASSERT(ktp != NULL);
179 
180 	/*
181 	 * Grab an entry by pushing the index up to the next one.
182 	 */
183 	spin_lock_irqsave(&wrap_lock, flags);
184 	index = ktp->kt_index;
185 	if (++ktp->kt_index == ktp->kt_nentries)
186 		ktp->kt_index = 0;
187 	spin_unlock_irqrestore(&wrap_lock, flags);
188 
189 	if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
190 		ktp->kt_rollover = 1;
191 
192 	ASSERT((index >= 0) && (index < ktp->kt_nentries));
193 
194 	ktep = &(ktp->kt_entries[index]);
195 
196 	ktep->val[0]  = val0;
197 	ktep->val[1]  = val1;
198 	ktep->val[2]  = val2;
199 	ktep->val[3]  = val3;
200 	ktep->val[4]  = val4;
201 	ktep->val[5]  = val5;
202 	ktep->val[6]  = val6;
203 	ktep->val[7]  = val7;
204 	ktep->val[8]  = val8;
205 	ktep->val[9]  = val9;
206 	ktep->val[10] = val10;
207 	ktep->val[11] = val11;
208 	ktep->val[12] = val12;
209 	ktep->val[13] = val13;
210 	ktep->val[14] = val14;
211 	ktep->val[15] = val15;
212 }
213 
214 /*
215  * Return the number of entries in the trace buffer.
216  */
217 int
ktrace_nentries(ktrace_t * ktp)218 ktrace_nentries(
219 	ktrace_t        *ktp)
220 {
221 	if (ktp == NULL) {
222 		return 0;
223 	}
224 
225 	return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
226 }
227 
228 /*
229  * ktrace_first()
230  *
231  * This is used to find the start of the trace buffer.
232  * In conjunction with ktrace_next() it can be used to
233  * iterate through the entire trace buffer.  This code does
234  * not do any locking because it is assumed that it is called
235  * from the debugger.
236  *
237  * The caller must pass in a pointer to a ktrace_snap
238  * structure in which we will keep some state used to
239  * iterate through the buffer.  This state must not touched
240  * by any code outside of this module.
241  */
242 ktrace_entry_t *
ktrace_first(ktrace_t * ktp,ktrace_snap_t * ktsp)243 ktrace_first(ktrace_t   *ktp, ktrace_snap_t     *ktsp)
244 {
245 	ktrace_entry_t  *ktep;
246 	int             index;
247 	int             nentries;
248 
249 	if (ktp->kt_rollover)
250 		index = ktp->kt_index;
251 	else
252 		index = 0;
253 
254 	ktsp->ks_start = index;
255 	ktep = &(ktp->kt_entries[index]);
256 
257 	nentries = ktrace_nentries(ktp);
258 	index++;
259 	if (index < nentries) {
260 		ktsp->ks_index = index;
261 	} else {
262 		ktsp->ks_index = 0;
263 		if (index > nentries)
264 			ktep = NULL;
265 	}
266 	return ktep;
267 }
268 
269 /*
270  * ktrace_next()
271  *
272  * This is used to iterate through the entries of the given
273  * trace buffer.  The caller must pass in the ktrace_snap_t
274  * structure initialized by ktrace_first().  The return value
275  * will be either a pointer to the next ktrace_entry or NULL
276  * if all of the entries have been traversed.
277  */
278 ktrace_entry_t *
ktrace_next(ktrace_t * ktp,ktrace_snap_t * ktsp)279 ktrace_next(
280 	ktrace_t        *ktp,
281 	ktrace_snap_t   *ktsp)
282 {
283 	int             index;
284 	ktrace_entry_t  *ktep;
285 
286 	index = ktsp->ks_index;
287 	if (index == ktsp->ks_start) {
288 		ktep = NULL;
289 	} else {
290 		ktep = &ktp->kt_entries[index];
291 	}
292 
293 	index++;
294 	if (index == ktrace_nentries(ktp)) {
295 		ktsp->ks_index = 0;
296 	} else {
297 		ktsp->ks_index = index;
298 	}
299 
300 	return ktep;
301 }
302 
303 /*
304  * ktrace_skip()
305  *
306  * Skip the next "count" entries and return the entry after that.
307  * Return NULL if this causes us to iterate past the beginning again.
308  */
309 ktrace_entry_t *
ktrace_skip(ktrace_t * ktp,int count,ktrace_snap_t * ktsp)310 ktrace_skip(
311 	ktrace_t        *ktp,
312 	int             count,
313 	ktrace_snap_t   *ktsp)
314 {
315 	int             index;
316 	int             new_index;
317 	ktrace_entry_t  *ktep;
318 	int             nentries = ktrace_nentries(ktp);
319 
320 	index = ktsp->ks_index;
321 	new_index = index + count;
322 	while (new_index >= nentries) {
323 		new_index -= nentries;
324 	}
325 	if (index == ktsp->ks_start) {
326 		/*
327 		 * We've iterated around to the start, so we're done.
328 		 */
329 		ktep = NULL;
330 	} else if ((new_index < index) && (index < ktsp->ks_index)) {
331 		/*
332 		 * We've skipped past the start again, so we're done.
333 		 */
334 		ktep = NULL;
335 		ktsp->ks_index = ktsp->ks_start;
336 	} else {
337 		ktep = &(ktp->kt_entries[new_index]);
338 		new_index++;
339 		if (new_index == nentries) {
340 			ktsp->ks_index = 0;
341 		} else {
342 			ktsp->ks_index = new_index;
343 		}
344 	}
345 	return ktep;
346 }
347