1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Loopback bridge driver for the Greybus loopback module.
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/kthread.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <linux/sizes.h>
19 #include <linux/cdev.h>
20 #include <linux/fs.h>
21 #include <linux/kfifo.h>
22 #include <linux/debugfs.h>
23 #include <linux/list_sort.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/greybus.h>
29 #include <asm/div64.h>
30 
31 #define NSEC_PER_DAY 86400000000000ULL
32 
33 struct gb_loopback_stats {
34 	u32 min;
35 	u32 max;
36 	u64 sum;
37 	u32 count;
38 };
39 
40 struct gb_loopback_device {
41 	struct dentry *root;
42 	u32 count;
43 	size_t size_max;
44 
45 	/* We need to take a lock in atomic context */
46 	spinlock_t lock;
47 	wait_queue_head_t wq;
48 };
49 
50 static struct gb_loopback_device gb_dev;
51 
52 struct gb_loopback_async_operation {
53 	struct gb_loopback *gb;
54 	struct gb_operation *operation;
55 	ktime_t ts;
56 	int (*completion)(struct gb_loopback_async_operation *op_async);
57 };
58 
59 struct gb_loopback {
60 	struct gb_connection *connection;
61 
62 	struct dentry *file;
63 	struct kfifo kfifo_lat;
64 	struct mutex mutex;
65 	struct task_struct *task;
66 	struct device *dev;
67 	wait_queue_head_t wq;
68 	wait_queue_head_t wq_completion;
69 	atomic_t outstanding_operations;
70 
71 	/* Per connection stats */
72 	ktime_t ts;
73 	struct gb_loopback_stats latency;
74 	struct gb_loopback_stats throughput;
75 	struct gb_loopback_stats requests_per_second;
76 	struct gb_loopback_stats apbridge_unipro_latency;
77 	struct gb_loopback_stats gbphy_firmware_latency;
78 
79 	int type;
80 	int async;
81 	int id;
82 	u32 size;
83 	u32 iteration_max;
84 	u32 iteration_count;
85 	int us_wait;
86 	u32 error;
87 	u32 requests_completed;
88 	u32 requests_timedout;
89 	u32 timeout;
90 	u32 jiffy_timeout;
91 	u32 timeout_min;
92 	u32 timeout_max;
93 	u32 outstanding_operations_max;
94 	u64 elapsed_nsecs;
95 	u32 apbridge_latency_ts;
96 	u32 gbphy_latency_ts;
97 
98 	u32 send_count;
99 };
100 
101 static struct class loopback_class = {
102 	.name		= "gb_loopback",
103 	.owner		= THIS_MODULE,
104 };
105 static DEFINE_IDA(loopback_ida);
106 
107 /* Min/max values in jiffies */
108 #define GB_LOOPBACK_TIMEOUT_MIN				1
109 #define GB_LOOPBACK_TIMEOUT_MAX				10000
110 
111 #define GB_LOOPBACK_FIFO_DEFAULT			8192
112 
113 static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
114 module_param(kfifo_depth, uint, 0444);
115 
116 /* Maximum size of any one send data buffer we support */
117 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
118 
119 #define GB_LOOPBACK_US_WAIT_MAX				1000000
120 
121 /* interface sysfs attributes */
122 #define gb_loopback_ro_attr(field)				\
123 static ssize_t field##_show(struct device *dev,			\
124 			    struct device_attribute *attr,		\
125 			    char *buf)					\
126 {									\
127 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
128 	return sprintf(buf, "%u\n", gb->field);			\
129 }									\
130 static DEVICE_ATTR_RO(field)
131 
132 #define gb_loopback_ro_stats_attr(name, field, type)		\
133 static ssize_t name##_##field##_show(struct device *dev,	\
134 			    struct device_attribute *attr,		\
135 			    char *buf)					\
136 {									\
137 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
138 	/* Report 0 for min and max if no transfer succeeded */		\
139 	if (!gb->requests_completed)					\
140 		return sprintf(buf, "0\n");				\
141 	return sprintf(buf, "%" #type "\n", gb->name.field);		\
142 }									\
143 static DEVICE_ATTR_RO(name##_##field)
144 
145 #define gb_loopback_ro_avg_attr(name)			\
146 static ssize_t name##_avg_show(struct device *dev,		\
147 			    struct device_attribute *attr,		\
148 			    char *buf)					\
149 {									\
150 	struct gb_loopback_stats *stats;				\
151 	struct gb_loopback *gb;						\
152 	u64 avg, rem;							\
153 	u32 count;							\
154 	gb = dev_get_drvdata(dev);			\
155 	stats = &gb->name;					\
156 	count = stats->count ? stats->count : 1;			\
157 	avg = stats->sum + count / 2000000; /* round closest */		\
158 	rem = do_div(avg, count);					\
159 	rem *= 1000000;							\
160 	do_div(rem, count);						\
161 	return sprintf(buf, "%llu.%06u\n", avg, (u32)rem);		\
162 }									\
163 static DEVICE_ATTR_RO(name##_avg)
164 
165 #define gb_loopback_stats_attrs(field)				\
166 	gb_loopback_ro_stats_attr(field, min, u);		\
167 	gb_loopback_ro_stats_attr(field, max, u);		\
168 	gb_loopback_ro_avg_attr(field)
169 
170 #define gb_loopback_attr(field, type)					\
171 static ssize_t field##_show(struct device *dev,				\
172 			    struct device_attribute *attr,		\
173 			    char *buf)					\
174 {									\
175 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
176 	return sprintf(buf, "%" #type "\n", gb->field);			\
177 }									\
178 static ssize_t field##_store(struct device *dev,			\
179 			    struct device_attribute *attr,		\
180 			    const char *buf,				\
181 			    size_t len)					\
182 {									\
183 	int ret;							\
184 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
185 	mutex_lock(&gb->mutex);						\
186 	ret = sscanf(buf, "%"#type, &gb->field);			\
187 	if (ret != 1)							\
188 		len = -EINVAL;						\
189 	else								\
190 		gb_loopback_check_attr(gb, bundle);			\
191 	mutex_unlock(&gb->mutex);					\
192 	return len;							\
193 }									\
194 static DEVICE_ATTR_RW(field)
195 
196 #define gb_dev_loopback_ro_attr(field, conn)				\
197 static ssize_t field##_show(struct device *dev,		\
198 			    struct device_attribute *attr,		\
199 			    char *buf)					\
200 {									\
201 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
202 	return sprintf(buf, "%u\n", gb->field);				\
203 }									\
204 static DEVICE_ATTR_RO(field)
205 
206 #define gb_dev_loopback_rw_attr(field, type)				\
207 static ssize_t field##_show(struct device *dev,				\
208 			    struct device_attribute *attr,		\
209 			    char *buf)					\
210 {									\
211 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
212 	return sprintf(buf, "%" #type "\n", gb->field);			\
213 }									\
214 static ssize_t field##_store(struct device *dev,			\
215 			    struct device_attribute *attr,		\
216 			    const char *buf,				\
217 			    size_t len)					\
218 {									\
219 	int ret;							\
220 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
221 	mutex_lock(&gb->mutex);						\
222 	ret = sscanf(buf, "%"#type, &gb->field);			\
223 	if (ret != 1)							\
224 		len = -EINVAL;						\
225 	else								\
226 		gb_loopback_check_attr(gb);		\
227 	mutex_unlock(&gb->mutex);					\
228 	return len;							\
229 }									\
230 static DEVICE_ATTR_RW(field)
231 
232 static void gb_loopback_reset_stats(struct gb_loopback *gb);
gb_loopback_check_attr(struct gb_loopback * gb)233 static void gb_loopback_check_attr(struct gb_loopback *gb)
234 {
235 	if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
236 		gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
237 	if (gb->size > gb_dev.size_max)
238 		gb->size = gb_dev.size_max;
239 	gb->requests_timedout = 0;
240 	gb->requests_completed = 0;
241 	gb->iteration_count = 0;
242 	gb->send_count = 0;
243 	gb->error = 0;
244 
245 	if (kfifo_depth < gb->iteration_max) {
246 		dev_warn(gb->dev,
247 			 "cannot log bytes %u kfifo_depth %u\n",
248 			 gb->iteration_max, kfifo_depth);
249 	}
250 	kfifo_reset_out(&gb->kfifo_lat);
251 
252 	switch (gb->type) {
253 	case GB_LOOPBACK_TYPE_PING:
254 	case GB_LOOPBACK_TYPE_TRANSFER:
255 	case GB_LOOPBACK_TYPE_SINK:
256 		gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
257 		if (!gb->jiffy_timeout)
258 			gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
259 		else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
260 			gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
261 		gb_loopback_reset_stats(gb);
262 		wake_up(&gb->wq);
263 		break;
264 	default:
265 		gb->type = 0;
266 		break;
267 	}
268 }
269 
270 /* Time to send and receive one message */
271 gb_loopback_stats_attrs(latency);
272 /* Number of requests sent per second on this cport */
273 gb_loopback_stats_attrs(requests_per_second);
274 /* Quantity of data sent and received on this cport */
275 gb_loopback_stats_attrs(throughput);
276 /* Latency across the UniPro link from APBridge's perspective */
277 gb_loopback_stats_attrs(apbridge_unipro_latency);
278 /* Firmware induced overhead in the GPBridge */
279 gb_loopback_stats_attrs(gbphy_firmware_latency);
280 
281 /* Number of errors encountered during loop */
282 gb_loopback_ro_attr(error);
283 /* Number of requests successfully completed async */
284 gb_loopback_ro_attr(requests_completed);
285 /* Number of requests timed out async */
286 gb_loopback_ro_attr(requests_timedout);
287 /* Timeout minimum in useconds */
288 gb_loopback_ro_attr(timeout_min);
289 /* Timeout minimum in useconds */
290 gb_loopback_ro_attr(timeout_max);
291 
292 /*
293  * Type of loopback message to send based on protocol type definitions
294  * 0 => Don't send message
295  * 2 => Send ping message continuously (message without payload)
296  * 3 => Send transfer message continuously (message with payload,
297  *					   payload returned in response)
298  * 4 => Send a sink message (message with payload, no payload in response)
299  */
300 gb_dev_loopback_rw_attr(type, d);
301 /* Size of transfer message payload: 0-4096 bytes */
302 gb_dev_loopback_rw_attr(size, u);
303 /* Time to wait between two messages: 0-1000 ms */
304 gb_dev_loopback_rw_attr(us_wait, d);
305 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
306 gb_dev_loopback_rw_attr(iteration_max, u);
307 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
308 gb_dev_loopback_ro_attr(iteration_count, false);
309 /* A flag to indicate synchronous or asynchronous operations */
310 gb_dev_loopback_rw_attr(async, u);
311 /* Timeout of an individual asynchronous request */
312 gb_dev_loopback_rw_attr(timeout, u);
313 /* Maximum number of in-flight operations before back-off */
314 gb_dev_loopback_rw_attr(outstanding_operations_max, u);
315 
316 static struct attribute *loopback_attrs[] = {
317 	&dev_attr_latency_min.attr,
318 	&dev_attr_latency_max.attr,
319 	&dev_attr_latency_avg.attr,
320 	&dev_attr_requests_per_second_min.attr,
321 	&dev_attr_requests_per_second_max.attr,
322 	&dev_attr_requests_per_second_avg.attr,
323 	&dev_attr_throughput_min.attr,
324 	&dev_attr_throughput_max.attr,
325 	&dev_attr_throughput_avg.attr,
326 	&dev_attr_apbridge_unipro_latency_min.attr,
327 	&dev_attr_apbridge_unipro_latency_max.attr,
328 	&dev_attr_apbridge_unipro_latency_avg.attr,
329 	&dev_attr_gbphy_firmware_latency_min.attr,
330 	&dev_attr_gbphy_firmware_latency_max.attr,
331 	&dev_attr_gbphy_firmware_latency_avg.attr,
332 	&dev_attr_type.attr,
333 	&dev_attr_size.attr,
334 	&dev_attr_us_wait.attr,
335 	&dev_attr_iteration_count.attr,
336 	&dev_attr_iteration_max.attr,
337 	&dev_attr_async.attr,
338 	&dev_attr_error.attr,
339 	&dev_attr_requests_completed.attr,
340 	&dev_attr_requests_timedout.attr,
341 	&dev_attr_timeout.attr,
342 	&dev_attr_outstanding_operations_max.attr,
343 	&dev_attr_timeout_min.attr,
344 	&dev_attr_timeout_max.attr,
345 	NULL,
346 };
347 ATTRIBUTE_GROUPS(loopback);
348 
349 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
350 
gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)351 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
352 {
353 	do_div(elapsed_nsecs, NSEC_PER_USEC);
354 	return elapsed_nsecs;
355 }
356 
__gb_loopback_calc_latency(u64 t1,u64 t2)357 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
358 {
359 	if (t2 > t1)
360 		return t2 - t1;
361 	else
362 		return NSEC_PER_DAY - t2 + t1;
363 }
364 
gb_loopback_calc_latency(ktime_t ts,ktime_t te)365 static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
366 {
367 	return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
368 }
369 
gb_loopback_operation_sync(struct gb_loopback * gb,int type,void * request,int request_size,void * response,int response_size)370 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
371 				      void *request, int request_size,
372 				      void *response, int response_size)
373 {
374 	struct gb_operation *operation;
375 	ktime_t ts, te;
376 	int ret;
377 
378 	ts = ktime_get();
379 	operation = gb_operation_create(gb->connection, type, request_size,
380 					response_size, GFP_KERNEL);
381 	if (!operation)
382 		return -ENOMEM;
383 
384 	if (request_size)
385 		memcpy(operation->request->payload, request, request_size);
386 
387 	ret = gb_operation_request_send_sync(operation);
388 	if (ret) {
389 		dev_err(&gb->connection->bundle->dev,
390 			"synchronous operation failed: %d\n", ret);
391 		goto out_put_operation;
392 	} else {
393 		if (response_size == operation->response->payload_size) {
394 			memcpy(response, operation->response->payload,
395 			       response_size);
396 		} else {
397 			dev_err(&gb->connection->bundle->dev,
398 				"response size %zu expected %d\n",
399 				operation->response->payload_size,
400 				response_size);
401 			ret = -EINVAL;
402 			goto out_put_operation;
403 		}
404 	}
405 
406 	te = ktime_get();
407 
408 	/* Calculate the total time the message took */
409 	gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
410 
411 out_put_operation:
412 	gb_operation_put(operation);
413 
414 	return ret;
415 }
416 
gb_loopback_async_wait_all(struct gb_loopback * gb)417 static void gb_loopback_async_wait_all(struct gb_loopback *gb)
418 {
419 	wait_event(gb->wq_completion,
420 		   !atomic_read(&gb->outstanding_operations));
421 }
422 
gb_loopback_async_operation_callback(struct gb_operation * operation)423 static void gb_loopback_async_operation_callback(struct gb_operation *operation)
424 {
425 	struct gb_loopback_async_operation *op_async;
426 	struct gb_loopback *gb;
427 	ktime_t te;
428 	int result;
429 
430 	te = ktime_get();
431 	result = gb_operation_result(operation);
432 	op_async = gb_operation_get_data(operation);
433 	gb = op_async->gb;
434 
435 	mutex_lock(&gb->mutex);
436 
437 	if (!result && op_async->completion)
438 		result = op_async->completion(op_async);
439 
440 	if (!result) {
441 		gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
442 	} else {
443 		gb->error++;
444 		if (result == -ETIMEDOUT)
445 			gb->requests_timedout++;
446 	}
447 
448 	gb->iteration_count++;
449 	gb_loopback_calculate_stats(gb, result);
450 
451 	mutex_unlock(&gb->mutex);
452 
453 	dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
454 		operation->id);
455 
456 	/* Wake up waiters */
457 	atomic_dec(&op_async->gb->outstanding_operations);
458 	wake_up(&gb->wq_completion);
459 
460 	/* Release resources */
461 	gb_operation_put(operation);
462 	kfree(op_async);
463 }
464 
gb_loopback_async_operation(struct gb_loopback * gb,int type,void * request,int request_size,int response_size,void * completion)465 static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
466 				       void *request, int request_size,
467 				       int response_size,
468 				       void *completion)
469 {
470 	struct gb_loopback_async_operation *op_async;
471 	struct gb_operation *operation;
472 	int ret;
473 
474 	op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
475 	if (!op_async)
476 		return -ENOMEM;
477 
478 	operation = gb_operation_create(gb->connection, type, request_size,
479 					response_size, GFP_KERNEL);
480 	if (!operation) {
481 		kfree(op_async);
482 		return -ENOMEM;
483 	}
484 
485 	if (request_size)
486 		memcpy(operation->request->payload, request, request_size);
487 
488 	gb_operation_set_data(operation, op_async);
489 
490 	op_async->gb = gb;
491 	op_async->operation = operation;
492 	op_async->completion = completion;
493 
494 	op_async->ts = ktime_get();
495 
496 	atomic_inc(&gb->outstanding_operations);
497 	ret = gb_operation_request_send(operation,
498 					gb_loopback_async_operation_callback,
499 					jiffies_to_msecs(gb->jiffy_timeout),
500 					GFP_KERNEL);
501 	if (ret) {
502 		atomic_dec(&gb->outstanding_operations);
503 		gb_operation_put(operation);
504 		kfree(op_async);
505 	}
506 	return ret;
507 }
508 
gb_loopback_sync_sink(struct gb_loopback * gb,u32 len)509 static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
510 {
511 	struct gb_loopback_transfer_request *request;
512 	int retval;
513 
514 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
515 	if (!request)
516 		return -ENOMEM;
517 
518 	request->len = cpu_to_le32(len);
519 	retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
520 					    request, len + sizeof(*request),
521 					    NULL, 0);
522 	kfree(request);
523 	return retval;
524 }
525 
gb_loopback_sync_transfer(struct gb_loopback * gb,u32 len)526 static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
527 {
528 	struct gb_loopback_transfer_request *request;
529 	struct gb_loopback_transfer_response *response;
530 	int retval;
531 
532 	gb->apbridge_latency_ts = 0;
533 	gb->gbphy_latency_ts = 0;
534 
535 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
536 	if (!request)
537 		return -ENOMEM;
538 	response = kmalloc(len + sizeof(*response), GFP_KERNEL);
539 	if (!response) {
540 		kfree(request);
541 		return -ENOMEM;
542 	}
543 
544 	memset(request->data, 0x5A, len);
545 
546 	request->len = cpu_to_le32(len);
547 	retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
548 					    request, len + sizeof(*request),
549 					    response, len + sizeof(*response));
550 	if (retval)
551 		goto gb_error;
552 
553 	if (memcmp(request->data, response->data, len)) {
554 		dev_err(&gb->connection->bundle->dev,
555 			"Loopback Data doesn't match\n");
556 		retval = -EREMOTEIO;
557 	}
558 	gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
559 	gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
560 
561 gb_error:
562 	kfree(request);
563 	kfree(response);
564 
565 	return retval;
566 }
567 
gb_loopback_sync_ping(struct gb_loopback * gb)568 static int gb_loopback_sync_ping(struct gb_loopback *gb)
569 {
570 	return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
571 					  NULL, 0, NULL, 0);
572 }
573 
gb_loopback_async_sink(struct gb_loopback * gb,u32 len)574 static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
575 {
576 	struct gb_loopback_transfer_request *request;
577 	int retval;
578 
579 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
580 	if (!request)
581 		return -ENOMEM;
582 
583 	request->len = cpu_to_le32(len);
584 	retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
585 					     request, len + sizeof(*request),
586 					     0, NULL);
587 	kfree(request);
588 	return retval;
589 }
590 
gb_loopback_async_transfer_complete(struct gb_loopback_async_operation * op_async)591 static int gb_loopback_async_transfer_complete(
592 				struct gb_loopback_async_operation *op_async)
593 {
594 	struct gb_loopback *gb;
595 	struct gb_operation *operation;
596 	struct gb_loopback_transfer_request *request;
597 	struct gb_loopback_transfer_response *response;
598 	size_t len;
599 	int retval = 0;
600 
601 	gb = op_async->gb;
602 	operation = op_async->operation;
603 	request = operation->request->payload;
604 	response = operation->response->payload;
605 	len = le32_to_cpu(request->len);
606 
607 	if (memcmp(request->data, response->data, len)) {
608 		dev_err(&gb->connection->bundle->dev,
609 			"Loopback Data doesn't match operation id %d\n",
610 			operation->id);
611 		retval = -EREMOTEIO;
612 	} else {
613 		gb->apbridge_latency_ts =
614 			(u32)__le32_to_cpu(response->reserved0);
615 		gb->gbphy_latency_ts =
616 			(u32)__le32_to_cpu(response->reserved1);
617 	}
618 
619 	return retval;
620 }
621 
gb_loopback_async_transfer(struct gb_loopback * gb,u32 len)622 static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
623 {
624 	struct gb_loopback_transfer_request *request;
625 	int retval, response_len;
626 
627 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
628 	if (!request)
629 		return -ENOMEM;
630 
631 	memset(request->data, 0x5A, len);
632 
633 	request->len = cpu_to_le32(len);
634 	response_len = sizeof(struct gb_loopback_transfer_response);
635 	retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
636 					     request, len + sizeof(*request),
637 					     len + response_len,
638 					     gb_loopback_async_transfer_complete);
639 	if (retval)
640 		goto gb_error;
641 
642 gb_error:
643 	kfree(request);
644 	return retval;
645 }
646 
gb_loopback_async_ping(struct gb_loopback * gb)647 static int gb_loopback_async_ping(struct gb_loopback *gb)
648 {
649 	return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
650 					   NULL, 0, 0, NULL);
651 }
652 
gb_loopback_request_handler(struct gb_operation * operation)653 static int gb_loopback_request_handler(struct gb_operation *operation)
654 {
655 	struct gb_connection *connection = operation->connection;
656 	struct gb_loopback_transfer_request *request;
657 	struct gb_loopback_transfer_response *response;
658 	struct device *dev = &connection->bundle->dev;
659 	size_t len;
660 
661 	/* By convention, the AP initiates the version operation */
662 	switch (operation->type) {
663 	case GB_LOOPBACK_TYPE_PING:
664 	case GB_LOOPBACK_TYPE_SINK:
665 		return 0;
666 	case GB_LOOPBACK_TYPE_TRANSFER:
667 		if (operation->request->payload_size < sizeof(*request)) {
668 			dev_err(dev, "transfer request too small (%zu < %zu)\n",
669 				operation->request->payload_size,
670 				sizeof(*request));
671 			return -EINVAL;	/* -EMSGSIZE */
672 		}
673 		request = operation->request->payload;
674 		len = le32_to_cpu(request->len);
675 		if (len > gb_dev.size_max) {
676 			dev_err(dev, "transfer request too large (%zu > %zu)\n",
677 				len, gb_dev.size_max);
678 			return -EINVAL;
679 		}
680 
681 		if (!gb_operation_response_alloc(operation,
682 				len + sizeof(*response), GFP_KERNEL)) {
683 			dev_err(dev, "error allocating response\n");
684 			return -ENOMEM;
685 		}
686 		response = operation->response->payload;
687 		response->len = cpu_to_le32(len);
688 		if (len)
689 			memcpy(response->data, request->data, len);
690 
691 		return 0;
692 	default:
693 		dev_err(dev, "unsupported request: %u\n", operation->type);
694 		return -EINVAL;
695 	}
696 }
697 
gb_loopback_reset_stats(struct gb_loopback * gb)698 static void gb_loopback_reset_stats(struct gb_loopback *gb)
699 {
700 	struct gb_loopback_stats reset = {
701 		.min = U32_MAX,
702 	};
703 
704 	/* Reset per-connection stats */
705 	memcpy(&gb->latency, &reset,
706 	       sizeof(struct gb_loopback_stats));
707 	memcpy(&gb->throughput, &reset,
708 	       sizeof(struct gb_loopback_stats));
709 	memcpy(&gb->requests_per_second, &reset,
710 	       sizeof(struct gb_loopback_stats));
711 	memcpy(&gb->apbridge_unipro_latency, &reset,
712 	       sizeof(struct gb_loopback_stats));
713 	memcpy(&gb->gbphy_firmware_latency, &reset,
714 	       sizeof(struct gb_loopback_stats));
715 
716 	/* Should be initialized at least once per transaction set */
717 	gb->apbridge_latency_ts = 0;
718 	gb->gbphy_latency_ts = 0;
719 	gb->ts = ktime_set(0, 0);
720 }
721 
gb_loopback_update_stats(struct gb_loopback_stats * stats,u32 val)722 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
723 {
724 	if (stats->min > val)
725 		stats->min = val;
726 	if (stats->max < val)
727 		stats->max = val;
728 	stats->sum += val;
729 	stats->count++;
730 }
731 
gb_loopback_update_stats_window(struct gb_loopback_stats * stats,u64 val,u32 count)732 static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
733 					    u64 val, u32 count)
734 {
735 	stats->sum += val;
736 	stats->count += count;
737 
738 	do_div(val, count);
739 	if (stats->min > val)
740 		stats->min = val;
741 	if (stats->max < val)
742 		stats->max = val;
743 }
744 
gb_loopback_requests_update(struct gb_loopback * gb,u32 latency)745 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
746 {
747 	u64 req = gb->requests_completed * USEC_PER_SEC;
748 
749 	gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
750 }
751 
gb_loopback_throughput_update(struct gb_loopback * gb,u32 latency)752 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
753 {
754 	u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
755 
756 	switch (gb->type) {
757 	case GB_LOOPBACK_TYPE_PING:
758 		break;
759 	case GB_LOOPBACK_TYPE_SINK:
760 		aggregate_size += sizeof(struct gb_loopback_transfer_request) +
761 				  gb->size;
762 		break;
763 	case GB_LOOPBACK_TYPE_TRANSFER:
764 		aggregate_size += sizeof(struct gb_loopback_transfer_request) +
765 				  sizeof(struct gb_loopback_transfer_response) +
766 				  gb->size * 2;
767 		break;
768 	default:
769 		return;
770 	}
771 
772 	aggregate_size *= gb->requests_completed;
773 	aggregate_size *= USEC_PER_SEC;
774 	gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
775 					latency);
776 }
777 
gb_loopback_calculate_latency_stats(struct gb_loopback * gb)778 static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
779 {
780 	u32 lat;
781 
782 	/* Express latency in terms of microseconds */
783 	lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
784 
785 	/* Log latency stastic */
786 	gb_loopback_update_stats(&gb->latency, lat);
787 
788 	/* Raw latency log on a per thread basis */
789 	kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
790 
791 	/* Log the firmware supplied latency values */
792 	gb_loopback_update_stats(&gb->apbridge_unipro_latency,
793 				 gb->apbridge_latency_ts);
794 	gb_loopback_update_stats(&gb->gbphy_firmware_latency,
795 				 gb->gbphy_latency_ts);
796 }
797 
gb_loopback_calculate_stats(struct gb_loopback * gb,bool error)798 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
799 {
800 	u64 nlat;
801 	u32 lat;
802 	ktime_t te;
803 
804 	if (!error) {
805 		gb->requests_completed++;
806 		gb_loopback_calculate_latency_stats(gb);
807 	}
808 
809 	te = ktime_get();
810 	nlat = gb_loopback_calc_latency(gb->ts, te);
811 	if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
812 		lat = gb_loopback_nsec_to_usec_latency(nlat);
813 
814 		gb_loopback_throughput_update(gb, lat);
815 		gb_loopback_requests_update(gb, lat);
816 
817 		if (gb->iteration_count != gb->iteration_max) {
818 			gb->ts = te;
819 			gb->requests_completed = 0;
820 		}
821 	}
822 }
823 
gb_loopback_async_wait_to_send(struct gb_loopback * gb)824 static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
825 {
826 	if (!(gb->async && gb->outstanding_operations_max))
827 		return;
828 	wait_event_interruptible(gb->wq_completion,
829 				 (atomic_read(&gb->outstanding_operations) <
830 				  gb->outstanding_operations_max) ||
831 				  kthread_should_stop());
832 }
833 
gb_loopback_fn(void * data)834 static int gb_loopback_fn(void *data)
835 {
836 	int error = 0;
837 	int us_wait = 0;
838 	int type;
839 	int ret;
840 	u32 size;
841 
842 	struct gb_loopback *gb = data;
843 	struct gb_bundle *bundle = gb->connection->bundle;
844 
845 	ret = gb_pm_runtime_get_sync(bundle);
846 	if (ret)
847 		return ret;
848 
849 	while (1) {
850 		if (!gb->type) {
851 			gb_pm_runtime_put_autosuspend(bundle);
852 			wait_event_interruptible(gb->wq, gb->type ||
853 						 kthread_should_stop());
854 			ret = gb_pm_runtime_get_sync(bundle);
855 			if (ret)
856 				return ret;
857 		}
858 
859 		if (kthread_should_stop())
860 			break;
861 
862 		/* Limit the maximum number of in-flight async operations */
863 		gb_loopback_async_wait_to_send(gb);
864 		if (kthread_should_stop())
865 			break;
866 
867 		mutex_lock(&gb->mutex);
868 
869 		/* Optionally terminate */
870 		if (gb->send_count == gb->iteration_max) {
871 			mutex_unlock(&gb->mutex);
872 
873 			/* Wait for synchronous and asynchronous completion */
874 			gb_loopback_async_wait_all(gb);
875 
876 			/* Mark complete unless user-space has poked us */
877 			mutex_lock(&gb->mutex);
878 			if (gb->iteration_count == gb->iteration_max) {
879 				gb->type = 0;
880 				gb->send_count = 0;
881 				sysfs_notify(&gb->dev->kobj,  NULL,
882 					     "iteration_count");
883 				dev_dbg(&bundle->dev, "load test complete\n");
884 			} else {
885 				dev_dbg(&bundle->dev,
886 					"continuing on with new test set\n");
887 			}
888 			mutex_unlock(&gb->mutex);
889 			continue;
890 		}
891 		size = gb->size;
892 		us_wait = gb->us_wait;
893 		type = gb->type;
894 		if (ktime_to_ns(gb->ts) == 0)
895 			gb->ts = ktime_get();
896 
897 		/* Else operations to perform */
898 		if (gb->async) {
899 			if (type == GB_LOOPBACK_TYPE_PING)
900 				error = gb_loopback_async_ping(gb);
901 			else if (type == GB_LOOPBACK_TYPE_TRANSFER)
902 				error = gb_loopback_async_transfer(gb, size);
903 			else if (type == GB_LOOPBACK_TYPE_SINK)
904 				error = gb_loopback_async_sink(gb, size);
905 
906 			if (error) {
907 				gb->error++;
908 				gb->iteration_count++;
909 			}
910 		} else {
911 			/* We are effectively single threaded here */
912 			if (type == GB_LOOPBACK_TYPE_PING)
913 				error = gb_loopback_sync_ping(gb);
914 			else if (type == GB_LOOPBACK_TYPE_TRANSFER)
915 				error = gb_loopback_sync_transfer(gb, size);
916 			else if (type == GB_LOOPBACK_TYPE_SINK)
917 				error = gb_loopback_sync_sink(gb, size);
918 
919 			if (error)
920 				gb->error++;
921 			gb->iteration_count++;
922 			gb_loopback_calculate_stats(gb, !!error);
923 		}
924 		gb->send_count++;
925 		mutex_unlock(&gb->mutex);
926 
927 		if (us_wait) {
928 			if (us_wait < 20000)
929 				usleep_range(us_wait, us_wait + 100);
930 			else
931 				msleep(us_wait / 1000);
932 		}
933 	}
934 
935 	gb_pm_runtime_put_autosuspend(bundle);
936 
937 	return 0;
938 }
939 
gb_loopback_dbgfs_latency_show_common(struct seq_file * s,struct kfifo * kfifo,struct mutex * mutex)940 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
941 						 struct kfifo *kfifo,
942 						 struct mutex *mutex)
943 {
944 	u32 latency;
945 	int retval;
946 
947 	if (kfifo_len(kfifo) == 0) {
948 		retval = -EAGAIN;
949 		goto done;
950 	}
951 
952 	mutex_lock(mutex);
953 	retval = kfifo_out(kfifo, &latency, sizeof(latency));
954 	if (retval > 0) {
955 		seq_printf(s, "%u", latency);
956 		retval = 0;
957 	}
958 	mutex_unlock(mutex);
959 done:
960 	return retval;
961 }
962 
gb_loopback_dbgfs_latency_show(struct seq_file * s,void * unused)963 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
964 {
965 	struct gb_loopback *gb = s->private;
966 
967 	return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
968 						     &gb->mutex);
969 }
970 DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency);
971 
972 #define DEBUGFS_NAMELEN 32
973 
gb_loopback_probe(struct gb_bundle * bundle,const struct greybus_bundle_id * id)974 static int gb_loopback_probe(struct gb_bundle *bundle,
975 			     const struct greybus_bundle_id *id)
976 {
977 	struct greybus_descriptor_cport *cport_desc;
978 	struct gb_connection *connection;
979 	struct gb_loopback *gb;
980 	struct device *dev;
981 	int retval;
982 	char name[DEBUGFS_NAMELEN];
983 	unsigned long flags;
984 
985 	if (bundle->num_cports != 1)
986 		return -ENODEV;
987 
988 	cport_desc = &bundle->cport_desc[0];
989 	if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
990 		return -ENODEV;
991 
992 	gb = kzalloc(sizeof(*gb), GFP_KERNEL);
993 	if (!gb)
994 		return -ENOMEM;
995 
996 	connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
997 					  gb_loopback_request_handler);
998 	if (IS_ERR(connection)) {
999 		retval = PTR_ERR(connection);
1000 		goto out_kzalloc;
1001 	}
1002 
1003 	gb->connection = connection;
1004 	greybus_set_drvdata(bundle, gb);
1005 
1006 	init_waitqueue_head(&gb->wq);
1007 	init_waitqueue_head(&gb->wq_completion);
1008 	atomic_set(&gb->outstanding_operations, 0);
1009 	gb_loopback_reset_stats(gb);
1010 
1011 	/* Reported values to user-space for min/max timeouts */
1012 	gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1013 	gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1014 
1015 	if (!gb_dev.count) {
1016 		/* Calculate maximum payload */
1017 		gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1018 		if (gb_dev.size_max <=
1019 			sizeof(struct gb_loopback_transfer_request)) {
1020 			retval = -EINVAL;
1021 			goto out_connection_destroy;
1022 		}
1023 		gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1024 	}
1025 
1026 	/* Create per-connection sysfs and debugfs data-points */
1027 	snprintf(name, sizeof(name), "raw_latency_%s",
1028 		 dev_name(&connection->bundle->dev));
1029 	gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
1030 				       &gb_loopback_dbgfs_latency_fops);
1031 
1032 	gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
1033 	if (gb->id < 0) {
1034 		retval = gb->id;
1035 		goto out_debugfs_remove;
1036 	}
1037 
1038 	retval = gb_connection_enable(connection);
1039 	if (retval)
1040 		goto out_ida_remove;
1041 
1042 	dev = device_create_with_groups(&loopback_class,
1043 					&connection->bundle->dev,
1044 					MKDEV(0, 0), gb, loopback_groups,
1045 					"gb_loopback%d", gb->id);
1046 	if (IS_ERR(dev)) {
1047 		retval = PTR_ERR(dev);
1048 		goto out_connection_disable;
1049 	}
1050 	gb->dev = dev;
1051 
1052 	/* Allocate kfifo */
1053 	if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1054 			GFP_KERNEL)) {
1055 		retval = -ENOMEM;
1056 		goto out_conn;
1057 	}
1058 	/* Fork worker thread */
1059 	mutex_init(&gb->mutex);
1060 	gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1061 	if (IS_ERR(gb->task)) {
1062 		retval = PTR_ERR(gb->task);
1063 		goto out_kfifo;
1064 	}
1065 
1066 	spin_lock_irqsave(&gb_dev.lock, flags);
1067 	gb_dev.count++;
1068 	spin_unlock_irqrestore(&gb_dev.lock, flags);
1069 
1070 	gb_connection_latency_tag_enable(connection);
1071 
1072 	gb_pm_runtime_put_autosuspend(bundle);
1073 
1074 	return 0;
1075 
1076 out_kfifo:
1077 	kfifo_free(&gb->kfifo_lat);
1078 out_conn:
1079 	device_unregister(dev);
1080 out_connection_disable:
1081 	gb_connection_disable(connection);
1082 out_ida_remove:
1083 	ida_simple_remove(&loopback_ida, gb->id);
1084 out_debugfs_remove:
1085 	debugfs_remove(gb->file);
1086 out_connection_destroy:
1087 	gb_connection_destroy(connection);
1088 out_kzalloc:
1089 	kfree(gb);
1090 
1091 	return retval;
1092 }
1093 
gb_loopback_disconnect(struct gb_bundle * bundle)1094 static void gb_loopback_disconnect(struct gb_bundle *bundle)
1095 {
1096 	struct gb_loopback *gb = greybus_get_drvdata(bundle);
1097 	unsigned long flags;
1098 	int ret;
1099 
1100 	ret = gb_pm_runtime_get_sync(bundle);
1101 	if (ret)
1102 		gb_pm_runtime_get_noresume(bundle);
1103 
1104 	gb_connection_disable(gb->connection);
1105 
1106 	if (!IS_ERR_OR_NULL(gb->task))
1107 		kthread_stop(gb->task);
1108 
1109 	kfifo_free(&gb->kfifo_lat);
1110 	gb_connection_latency_tag_disable(gb->connection);
1111 	debugfs_remove(gb->file);
1112 
1113 	/*
1114 	 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1115 	 * is disabled at the beginning and so we can't have any more
1116 	 * incoming/outgoing requests.
1117 	 */
1118 	gb_loopback_async_wait_all(gb);
1119 
1120 	spin_lock_irqsave(&gb_dev.lock, flags);
1121 	gb_dev.count--;
1122 	spin_unlock_irqrestore(&gb_dev.lock, flags);
1123 
1124 	device_unregister(gb->dev);
1125 	ida_simple_remove(&loopback_ida, gb->id);
1126 
1127 	gb_connection_destroy(gb->connection);
1128 	kfree(gb);
1129 }
1130 
1131 static const struct greybus_bundle_id gb_loopback_id_table[] = {
1132 	{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1133 	{ }
1134 };
1135 MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1136 
1137 static struct greybus_driver gb_loopback_driver = {
1138 	.name		= "loopback",
1139 	.probe		= gb_loopback_probe,
1140 	.disconnect	= gb_loopback_disconnect,
1141 	.id_table	= gb_loopback_id_table,
1142 };
1143 
loopback_init(void)1144 static int loopback_init(void)
1145 {
1146 	int retval;
1147 
1148 	spin_lock_init(&gb_dev.lock);
1149 	gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1150 
1151 	retval = class_register(&loopback_class);
1152 	if (retval)
1153 		goto err;
1154 
1155 	retval = greybus_register(&gb_loopback_driver);
1156 	if (retval)
1157 		goto err_unregister;
1158 
1159 	return 0;
1160 
1161 err_unregister:
1162 	class_unregister(&loopback_class);
1163 err:
1164 	debugfs_remove_recursive(gb_dev.root);
1165 	return retval;
1166 }
1167 module_init(loopback_init);
1168 
loopback_exit(void)1169 static void __exit loopback_exit(void)
1170 {
1171 	debugfs_remove_recursive(gb_dev.root);
1172 	greybus_deregister(&gb_loopback_driver);
1173 	class_unregister(&loopback_class);
1174 	ida_destroy(&loopback_ida);
1175 }
1176 module_exit(loopback_exit);
1177 
1178 MODULE_LICENSE("GPL v2");
1179