1 /*
2  * Renesas USB driver
3  *
4  * Copyright (C) 2011 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
15  *
16  */
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "./common.h"
21 #include "./pipe.h"
22 
23 #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p)	(&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p)	(&((p)->fifo_info.d1fifo))
26 #define usbhsf_is_cfifo(p, f)	(usbhsf_get_cfifo(p) == f)
27 
28 #define usbhsf_fifo_is_busy(f)	((f)->pipe) /* see usbhs_pipe_select_fifo */
29 
30 /*
31  *		packet initialize
32  */
usbhs_pkt_init(struct usbhs_pkt * pkt)33 void usbhs_pkt_init(struct usbhs_pkt *pkt)
34 {
35 	pkt->dma = DMA_ADDR_INVALID;
36 	INIT_LIST_HEAD(&pkt->node);
37 }
38 
39 /*
40  *		packet control function
41  */
usbhsf_null_handle(struct usbhs_pkt * pkt,int * is_done)42 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
43 {
44 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
45 	struct device *dev = usbhs_priv_to_dev(priv);
46 
47 	dev_err(dev, "null handler\n");
48 
49 	return -EINVAL;
50 }
51 
52 static struct usbhs_pkt_handle usbhsf_null_handler = {
53 	.prepare = usbhsf_null_handle,
54 	.try_run = usbhsf_null_handle,
55 };
56 
usbhs_pkt_push(struct usbhs_pipe * pipe,struct usbhs_pkt * pkt,void (* done)(struct usbhs_priv * priv,struct usbhs_pkt * pkt),void * buf,int len,int zero,int sequence)57 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
58 		    void (*done)(struct usbhs_priv *priv,
59 				 struct usbhs_pkt *pkt),
60 		    void *buf, int len, int zero, int sequence)
61 {
62 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
63 	struct device *dev = usbhs_priv_to_dev(priv);
64 	unsigned long flags;
65 
66 	if (!done) {
67 		dev_err(dev, "no done function\n");
68 		return;
69 	}
70 
71 	/********************  spin lock ********************/
72 	usbhs_lock(priv, flags);
73 
74 	if (!pipe->handler) {
75 		dev_err(dev, "no handler function\n");
76 		pipe->handler = &usbhsf_null_handler;
77 	}
78 
79 	list_move_tail(&pkt->node, &pipe->list);
80 
81 	/*
82 	 * each pkt must hold own handler.
83 	 * because handler might be changed by its situation.
84 	 * dma handler -> pio handler.
85 	 */
86 	pkt->pipe	= pipe;
87 	pkt->buf	= buf;
88 	pkt->handler	= pipe->handler;
89 	pkt->length	= len;
90 	pkt->zero	= zero;
91 	pkt->actual	= 0;
92 	pkt->done	= done;
93 	pkt->sequence	= sequence;
94 
95 	usbhs_unlock(priv, flags);
96 	/********************  spin unlock ******************/
97 }
98 
__usbhsf_pkt_del(struct usbhs_pkt * pkt)99 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
100 {
101 	list_del_init(&pkt->node);
102 }
103 
__usbhsf_pkt_get(struct usbhs_pipe * pipe)104 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
105 {
106 	if (list_empty(&pipe->list))
107 		return NULL;
108 
109 	return list_first_entry(&pipe->list, struct usbhs_pkt, node);
110 }
111 
usbhs_pkt_pop(struct usbhs_pipe * pipe,struct usbhs_pkt * pkt)112 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
113 {
114 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
115 	unsigned long flags;
116 
117 	/********************  spin lock ********************/
118 	usbhs_lock(priv, flags);
119 
120 	if (!pkt)
121 		pkt = __usbhsf_pkt_get(pipe);
122 
123 	if (pkt)
124 		__usbhsf_pkt_del(pkt);
125 
126 	usbhs_unlock(priv, flags);
127 	/********************  spin unlock ******************/
128 
129 	return pkt;
130 }
131 
132 enum {
133 	USBHSF_PKT_PREPARE,
134 	USBHSF_PKT_TRY_RUN,
135 	USBHSF_PKT_DMA_DONE,
136 };
137 
usbhsf_pkt_handler(struct usbhs_pipe * pipe,int type)138 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
139 {
140 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
141 	struct usbhs_pkt *pkt;
142 	struct device *dev = usbhs_priv_to_dev(priv);
143 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
144 	unsigned long flags;
145 	int ret = 0;
146 	int is_done = 0;
147 
148 	/********************  spin lock ********************/
149 	usbhs_lock(priv, flags);
150 
151 	pkt = __usbhsf_pkt_get(pipe);
152 	if (!pkt)
153 		goto __usbhs_pkt_handler_end;
154 
155 	switch (type) {
156 	case USBHSF_PKT_PREPARE:
157 		func = pkt->handler->prepare;
158 		break;
159 	case USBHSF_PKT_TRY_RUN:
160 		func = pkt->handler->try_run;
161 		break;
162 	case USBHSF_PKT_DMA_DONE:
163 		func = pkt->handler->dma_done;
164 		break;
165 	default:
166 		dev_err(dev, "unknown pkt hander\n");
167 		goto __usbhs_pkt_handler_end;
168 	}
169 
170 	ret = func(pkt, &is_done);
171 
172 	if (is_done)
173 		__usbhsf_pkt_del(pkt);
174 
175 __usbhs_pkt_handler_end:
176 	usbhs_unlock(priv, flags);
177 	/********************  spin unlock ******************/
178 
179 	if (is_done) {
180 		pkt->done(priv, pkt);
181 		usbhs_pkt_start(pipe);
182 	}
183 
184 	return ret;
185 }
186 
usbhs_pkt_start(struct usbhs_pipe * pipe)187 void usbhs_pkt_start(struct usbhs_pipe *pipe)
188 {
189 	usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
190 }
191 
192 /*
193  *		irq enable/disable function
194  */
195 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, bempsts, e)
196 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, brdysts, e)
197 #define usbhsf_irq_callback_ctrl(pipe, status, enable)			\
198 	({								\
199 		struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);	\
200 		struct usbhs_mod *mod = usbhs_mod_get_current(priv);	\
201 		u16 status = (1 << usbhs_pipe_number(pipe));		\
202 		if (!mod)						\
203 			return;						\
204 		if (enable)						\
205 			mod->irq_##status |= status;			\
206 		else							\
207 			mod->irq_##status &= ~status;			\
208 		usbhs_irq_callback_update(priv, mod);			\
209 	})
210 
usbhsf_tx_irq_ctrl(struct usbhs_pipe * pipe,int enable)211 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
212 {
213 	/*
214 	 * And DCP pipe can NOT use "ready interrupt" for "send"
215 	 * it should use "empty" interrupt.
216 	 * see
217 	 *   "Operation" - "Interrupt Function" - "BRDY Interrupt"
218 	 *
219 	 * on the other hand, normal pipe can use "ready interrupt" for "send"
220 	 * even though it is single/double buffer
221 	 */
222 	if (usbhs_pipe_is_dcp(pipe))
223 		usbhsf_irq_empty_ctrl(pipe, enable);
224 	else
225 		usbhsf_irq_ready_ctrl(pipe, enable);
226 }
227 
usbhsf_rx_irq_ctrl(struct usbhs_pipe * pipe,int enable)228 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
229 {
230 	usbhsf_irq_ready_ctrl(pipe, enable);
231 }
232 
233 /*
234  *		FIFO ctrl
235  */
usbhsf_send_terminator(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)236 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
237 				   struct usbhs_fifo *fifo)
238 {
239 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
240 
241 	usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
242 }
243 
usbhsf_fifo_barrier(struct usbhs_priv * priv,struct usbhs_fifo * fifo)244 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
245 			       struct usbhs_fifo *fifo)
246 {
247 	int timeout = 1024;
248 
249 	do {
250 		/* The FIFO port is accessible */
251 		if (usbhs_read(priv, fifo->ctr) & FRDY)
252 			return 0;
253 
254 		udelay(10);
255 	} while (timeout--);
256 
257 	return -EBUSY;
258 }
259 
usbhsf_fifo_clear(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)260 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
261 			      struct usbhs_fifo *fifo)
262 {
263 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
264 
265 	if (!usbhs_pipe_is_dcp(pipe))
266 		usbhsf_fifo_barrier(priv, fifo);
267 
268 	usbhs_write(priv, fifo->ctr, BCLR);
269 }
270 
usbhsf_fifo_rcv_len(struct usbhs_priv * priv,struct usbhs_fifo * fifo)271 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
272 			       struct usbhs_fifo *fifo)
273 {
274 	return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
275 }
276 
usbhsf_fifo_unselect(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)277 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
278 				 struct usbhs_fifo *fifo)
279 {
280 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
281 
282 	usbhs_pipe_select_fifo(pipe, NULL);
283 	usbhs_write(priv, fifo->sel, 0);
284 }
285 
usbhsf_fifo_select(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo,int write)286 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
287 			      struct usbhs_fifo *fifo,
288 			      int write)
289 {
290 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
291 	struct device *dev = usbhs_priv_to_dev(priv);
292 	int timeout = 1024;
293 	u16 mask = ((1 << 5) | 0xF);		/* mask of ISEL | CURPIPE */
294 	u16 base = usbhs_pipe_number(pipe);	/* CURPIPE */
295 
296 	if (usbhs_pipe_is_busy(pipe) ||
297 	    usbhsf_fifo_is_busy(fifo))
298 		return -EBUSY;
299 
300 	if (usbhs_pipe_is_dcp(pipe)) {
301 		base |= (1 == write) << 5;	/* ISEL */
302 
303 		if (usbhs_mod_is_host(priv))
304 			usbhs_dcp_dir_for_host(pipe, write);
305 	}
306 
307 	/* "base" will be used below  */
308 	if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
309 		usbhs_write(priv, fifo->sel, base);
310 	else
311 		usbhs_write(priv, fifo->sel, base | MBW_32);
312 
313 	/* check ISEL and CURPIPE value */
314 	while (timeout--) {
315 		if (base == (mask & usbhs_read(priv, fifo->sel))) {
316 			usbhs_pipe_select_fifo(pipe, fifo);
317 			return 0;
318 		}
319 		udelay(10);
320 	}
321 
322 	dev_err(dev, "fifo select error\n");
323 
324 	return -EIO;
325 }
326 
327 /*
328  *		DCP status stage
329  */
usbhs_dcp_dir_switch_to_write(struct usbhs_pkt * pkt,int * is_done)330 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
331 {
332 	struct usbhs_pipe *pipe = pkt->pipe;
333 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
334 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
335 	struct device *dev = usbhs_priv_to_dev(priv);
336 	int ret;
337 
338 	usbhs_pipe_disable(pipe);
339 
340 	ret = usbhsf_fifo_select(pipe, fifo, 1);
341 	if (ret < 0) {
342 		dev_err(dev, "%s() faile\n", __func__);
343 		return ret;
344 	}
345 
346 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
347 
348 	usbhsf_fifo_clear(pipe, fifo);
349 	usbhsf_send_terminator(pipe, fifo);
350 
351 	usbhsf_fifo_unselect(pipe, fifo);
352 
353 	usbhsf_tx_irq_ctrl(pipe, 1);
354 	usbhs_pipe_enable(pipe);
355 
356 	return ret;
357 }
358 
usbhs_dcp_dir_switch_to_read(struct usbhs_pkt * pkt,int * is_done)359 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
360 {
361 	struct usbhs_pipe *pipe = pkt->pipe;
362 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
363 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
364 	struct device *dev = usbhs_priv_to_dev(priv);
365 	int ret;
366 
367 	usbhs_pipe_disable(pipe);
368 
369 	ret = usbhsf_fifo_select(pipe, fifo, 0);
370 	if (ret < 0) {
371 		dev_err(dev, "%s() fail\n", __func__);
372 		return ret;
373 	}
374 
375 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
376 	usbhsf_fifo_clear(pipe, fifo);
377 
378 	usbhsf_fifo_unselect(pipe, fifo);
379 
380 	usbhsf_rx_irq_ctrl(pipe, 1);
381 	usbhs_pipe_enable(pipe);
382 
383 	return ret;
384 
385 }
386 
usbhs_dcp_dir_switch_done(struct usbhs_pkt * pkt,int * is_done)387 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
388 {
389 	struct usbhs_pipe *pipe = pkt->pipe;
390 
391 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
392 		usbhsf_tx_irq_ctrl(pipe, 0);
393 	else
394 		usbhsf_rx_irq_ctrl(pipe, 0);
395 
396 	pkt->actual = pkt->length;
397 	*is_done = 1;
398 
399 	return 0;
400 }
401 
402 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
403 	.prepare = usbhs_dcp_dir_switch_to_write,
404 	.try_run = usbhs_dcp_dir_switch_done,
405 };
406 
407 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
408 	.prepare = usbhs_dcp_dir_switch_to_read,
409 	.try_run = usbhs_dcp_dir_switch_done,
410 };
411 
412 /*
413  *		DCP data stage (push)
414  */
usbhsf_dcp_data_stage_try_push(struct usbhs_pkt * pkt,int * is_done)415 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
416 {
417 	struct usbhs_pipe *pipe = pkt->pipe;
418 
419 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
420 
421 	/*
422 	 * change handler to PIO push
423 	 */
424 	pkt->handler = &usbhs_fifo_pio_push_handler;
425 
426 	return pkt->handler->prepare(pkt, is_done);
427 }
428 
429 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
430 	.prepare = usbhsf_dcp_data_stage_try_push,
431 };
432 
433 /*
434  *		DCP data stage (pop)
435  */
usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt * pkt,int * is_done)436 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
437 					     int *is_done)
438 {
439 	struct usbhs_pipe *pipe = pkt->pipe;
440 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
441 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
442 
443 	if (usbhs_pipe_is_busy(pipe))
444 		return 0;
445 
446 	/*
447 	 * prepare pop for DCP should
448 	 *  - change DCP direction,
449 	 *  - clear fifo
450 	 *  - DATA1
451 	 */
452 	usbhs_pipe_disable(pipe);
453 
454 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
455 
456 	usbhsf_fifo_select(pipe, fifo, 0);
457 	usbhsf_fifo_clear(pipe, fifo);
458 	usbhsf_fifo_unselect(pipe, fifo);
459 
460 	/*
461 	 * change handler to PIO pop
462 	 */
463 	pkt->handler = &usbhs_fifo_pio_pop_handler;
464 
465 	return pkt->handler->prepare(pkt, is_done);
466 }
467 
468 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
469 	.prepare = usbhsf_dcp_data_stage_prepare_pop,
470 };
471 
472 /*
473  *		PIO push handler
474  */
usbhsf_pio_try_push(struct usbhs_pkt * pkt,int * is_done)475 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
476 {
477 	struct usbhs_pipe *pipe = pkt->pipe;
478 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
479 	struct device *dev = usbhs_priv_to_dev(priv);
480 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
481 	void __iomem *addr = priv->base + fifo->port;
482 	u8 *buf;
483 	int maxp = usbhs_pipe_get_maxpacket(pipe);
484 	int total_len;
485 	int i, ret, len;
486 	int is_short;
487 
488 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
489 	pkt->sequence = -1; /* -1 sequence will be ignored */
490 
491 	ret = usbhsf_fifo_select(pipe, fifo, 1);
492 	if (ret < 0)
493 		return 0;
494 
495 	ret = usbhs_pipe_is_accessible(pipe);
496 	if (ret < 0) {
497 		/* inaccessible pipe is not an error */
498 		ret = 0;
499 		goto usbhs_fifo_write_busy;
500 	}
501 
502 	ret = usbhsf_fifo_barrier(priv, fifo);
503 	if (ret < 0)
504 		goto usbhs_fifo_write_busy;
505 
506 	buf		= pkt->buf    + pkt->actual;
507 	len		= pkt->length - pkt->actual;
508 	len		= min(len, maxp);
509 	total_len	= len;
510 	is_short	= total_len < maxp;
511 
512 	/*
513 	 * FIXME
514 	 *
515 	 * 32-bit access only
516 	 */
517 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
518 		iowrite32_rep(addr, buf, len / 4);
519 		len %= 4;
520 		buf += total_len - len;
521 	}
522 
523 	/* the rest operation */
524 	for (i = 0; i < len; i++)
525 		iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
526 
527 	/*
528 	 * variable update
529 	 */
530 	pkt->actual += total_len;
531 
532 	if (pkt->actual < pkt->length)
533 		*is_done = 0;		/* there are remainder data */
534 	else if (is_short)
535 		*is_done = 1;		/* short packet */
536 	else
537 		*is_done = !pkt->zero;	/* send zero packet ? */
538 
539 	/*
540 	 * pipe/irq handling
541 	 */
542 	if (is_short)
543 		usbhsf_send_terminator(pipe, fifo);
544 
545 	usbhsf_tx_irq_ctrl(pipe, !*is_done);
546 	usbhs_pipe_enable(pipe);
547 
548 	dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
549 		usbhs_pipe_number(pipe),
550 		pkt->length, pkt->actual, *is_done, pkt->zero);
551 
552 	/*
553 	 * Transmission end
554 	 */
555 	if (*is_done) {
556 		if (usbhs_pipe_is_dcp(pipe))
557 			usbhs_dcp_control_transfer_done(pipe);
558 	}
559 
560 	usbhsf_fifo_unselect(pipe, fifo);
561 
562 	return 0;
563 
564 usbhs_fifo_write_busy:
565 	usbhsf_fifo_unselect(pipe, fifo);
566 
567 	/*
568 	 * pipe is busy.
569 	 * retry in interrupt
570 	 */
571 	usbhsf_tx_irq_ctrl(pipe, 1);
572 
573 	return ret;
574 }
575 
576 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
577 	.prepare = usbhsf_pio_try_push,
578 	.try_run = usbhsf_pio_try_push,
579 };
580 
581 /*
582  *		PIO pop handler
583  */
usbhsf_prepare_pop(struct usbhs_pkt * pkt,int * is_done)584 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
585 {
586 	struct usbhs_pipe *pipe = pkt->pipe;
587 
588 	if (usbhs_pipe_is_busy(pipe))
589 		return 0;
590 
591 	/*
592 	 * pipe enable to prepare packet receive
593 	 */
594 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
595 	pkt->sequence = -1; /* -1 sequence will be ignored */
596 
597 	usbhs_pipe_enable(pipe);
598 	usbhsf_rx_irq_ctrl(pipe, 1);
599 
600 	return 0;
601 }
602 
usbhsf_pio_try_pop(struct usbhs_pkt * pkt,int * is_done)603 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
604 {
605 	struct usbhs_pipe *pipe = pkt->pipe;
606 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
607 	struct device *dev = usbhs_priv_to_dev(priv);
608 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
609 	void __iomem *addr = priv->base + fifo->port;
610 	u8 *buf;
611 	u32 data = 0;
612 	int maxp = usbhs_pipe_get_maxpacket(pipe);
613 	int rcv_len, len;
614 	int i, ret;
615 	int total_len = 0;
616 
617 	ret = usbhsf_fifo_select(pipe, fifo, 0);
618 	if (ret < 0)
619 		return 0;
620 
621 	ret = usbhsf_fifo_barrier(priv, fifo);
622 	if (ret < 0)
623 		goto usbhs_fifo_read_busy;
624 
625 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
626 
627 	buf		= pkt->buf    + pkt->actual;
628 	len		= pkt->length - pkt->actual;
629 	len		= min(len, rcv_len);
630 	total_len	= len;
631 
632 	/*
633 	 * update actual length first here to decide disable pipe.
634 	 * if this pipe keeps BUF status and all data were popped,
635 	 * then, next interrupt/token will be issued again
636 	 */
637 	pkt->actual += total_len;
638 
639 	if ((pkt->actual == pkt->length) ||	/* receive all data */
640 	    (total_len < maxp)) {		/* short packet */
641 		*is_done = 1;
642 		usbhsf_rx_irq_ctrl(pipe, 0);
643 		usbhs_pipe_disable(pipe);	/* disable pipe first */
644 	}
645 
646 	/*
647 	 * Buffer clear if Zero-Length packet
648 	 *
649 	 * see
650 	 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
651 	 */
652 	if (0 == rcv_len) {
653 		pkt->zero = 1;
654 		usbhsf_fifo_clear(pipe, fifo);
655 		goto usbhs_fifo_read_end;
656 	}
657 
658 	/*
659 	 * FIXME
660 	 *
661 	 * 32-bit access only
662 	 */
663 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
664 		ioread32_rep(addr, buf, len / 4);
665 		len %= 4;
666 		buf += total_len - len;
667 	}
668 
669 	/* the rest operation */
670 	for (i = 0; i < len; i++) {
671 		if (!(i & 0x03))
672 			data = ioread32(addr);
673 
674 		buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
675 	}
676 
677 usbhs_fifo_read_end:
678 	dev_dbg(dev, "  recv %d (%d/ %d/ %d/ %d)\n",
679 		usbhs_pipe_number(pipe),
680 		pkt->length, pkt->actual, *is_done, pkt->zero);
681 
682 usbhs_fifo_read_busy:
683 	usbhsf_fifo_unselect(pipe, fifo);
684 
685 	return ret;
686 }
687 
688 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
689 	.prepare = usbhsf_prepare_pop,
690 	.try_run = usbhsf_pio_try_pop,
691 };
692 
693 /*
694  *		DCP ctrol statge handler
695  */
usbhsf_ctrl_stage_end(struct usbhs_pkt * pkt,int * is_done)696 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
697 {
698 	usbhs_dcp_control_transfer_done(pkt->pipe);
699 
700 	*is_done = 1;
701 
702 	return 0;
703 }
704 
705 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
706 	.prepare = usbhsf_ctrl_stage_end,
707 	.try_run = usbhsf_ctrl_stage_end,
708 };
709 
710 /*
711  *		DMA fifo functions
712  */
usbhsf_dma_chan_get(struct usbhs_fifo * fifo,struct usbhs_pkt * pkt)713 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
714 					    struct usbhs_pkt *pkt)
715 {
716 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
717 		return fifo->tx_chan;
718 
719 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
720 		return fifo->rx_chan;
721 
722 	return NULL;
723 }
724 
usbhsf_get_dma_fifo(struct usbhs_priv * priv,struct usbhs_pkt * pkt)725 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
726 					      struct usbhs_pkt *pkt)
727 {
728 	struct usbhs_fifo *fifo;
729 
730 	/* DMA :: D0FIFO */
731 	fifo = usbhsf_get_d0fifo(priv);
732 	if (usbhsf_dma_chan_get(fifo, pkt) &&
733 	    !usbhsf_fifo_is_busy(fifo))
734 		return fifo;
735 
736 	/* DMA :: D1FIFO */
737 	fifo = usbhsf_get_d1fifo(priv);
738 	if (usbhsf_dma_chan_get(fifo, pkt) &&
739 	    !usbhsf_fifo_is_busy(fifo))
740 		return fifo;
741 
742 	return NULL;
743 }
744 
745 #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
746 #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
__usbhsf_dma_ctrl(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo,u16 dreqe)747 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
748 			      struct usbhs_fifo *fifo,
749 			      u16 dreqe)
750 {
751 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
752 
753 	usbhs_bset(priv, fifo->sel, DREQE, dreqe);
754 }
755 
756 #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
757 #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
__usbhsf_dma_map_ctrl(struct usbhs_pkt * pkt,int map)758 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
759 {
760 	struct usbhs_pipe *pipe = pkt->pipe;
761 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
762 	struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
763 
764 	return info->dma_map_ctrl(pkt, map);
765 }
766 
767 static void usbhsf_dma_complete(void *arg);
xfer_work(struct work_struct * work)768 static void xfer_work(struct work_struct *work)
769 {
770 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
771 	struct usbhs_pipe *pipe = pkt->pipe;
772 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
773 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
774 	struct scatterlist sg;
775 	struct dma_async_tx_descriptor *desc;
776 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
777 	struct device *dev = usbhs_priv_to_dev(priv);
778 	enum dma_transfer_direction dir;
779 	dma_cookie_t cookie;
780 
781 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
782 
783 	sg_init_table(&sg, 1);
784 	sg_set_page(&sg, virt_to_page(pkt->dma),
785 		    pkt->length, offset_in_page(pkt->dma));
786 	sg_dma_address(&sg) = pkt->dma + pkt->actual;
787 	sg_dma_len(&sg) = pkt->trans;
788 
789 	desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
790 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
791 	if (!desc)
792 		return;
793 
794 	desc->callback		= usbhsf_dma_complete;
795 	desc->callback_param	= pipe;
796 
797 	cookie = desc->tx_submit(desc);
798 	if (cookie < 0) {
799 		dev_err(dev, "Failed to submit dma descriptor\n");
800 		return;
801 	}
802 
803 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
804 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
805 
806 	usbhsf_dma_start(pipe, fifo);
807 	dma_async_issue_pending(chan);
808 }
809 
810 /*
811  *		DMA push handler
812  */
usbhsf_dma_prepare_push(struct usbhs_pkt * pkt,int * is_done)813 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
814 {
815 	struct usbhs_pipe *pipe = pkt->pipe;
816 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
817 	struct usbhs_fifo *fifo;
818 	int len = pkt->length - pkt->actual;
819 	int ret;
820 
821 	if (usbhs_pipe_is_busy(pipe))
822 		return 0;
823 
824 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
825 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
826 	    usbhs_pipe_is_dcp(pipe))
827 		goto usbhsf_pio_prepare_push;
828 
829 	if (len % 4) /* 32bit alignment */
830 		goto usbhsf_pio_prepare_push;
831 
832 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
833 		goto usbhsf_pio_prepare_push;
834 
835 	/* get enable DMA fifo */
836 	fifo = usbhsf_get_dma_fifo(priv, pkt);
837 	if (!fifo)
838 		goto usbhsf_pio_prepare_push;
839 
840 	if (usbhsf_dma_map(pkt) < 0)
841 		goto usbhsf_pio_prepare_push;
842 
843 	ret = usbhsf_fifo_select(pipe, fifo, 0);
844 	if (ret < 0)
845 		goto usbhsf_pio_prepare_push_unmap;
846 
847 	pkt->trans = len;
848 
849 	INIT_WORK(&pkt->work, xfer_work);
850 	schedule_work(&pkt->work);
851 
852 	return 0;
853 
854 usbhsf_pio_prepare_push_unmap:
855 	usbhsf_dma_unmap(pkt);
856 usbhsf_pio_prepare_push:
857 	/*
858 	 * change handler to PIO
859 	 */
860 	pkt->handler = &usbhs_fifo_pio_push_handler;
861 
862 	return pkt->handler->prepare(pkt, is_done);
863 }
864 
usbhsf_dma_push_done(struct usbhs_pkt * pkt,int * is_done)865 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
866 {
867 	struct usbhs_pipe *pipe = pkt->pipe;
868 
869 	pkt->actual = pkt->trans;
870 
871 	*is_done = !pkt->zero;	/* send zero packet ? */
872 
873 	usbhsf_dma_stop(pipe, pipe->fifo);
874 	usbhsf_dma_unmap(pkt);
875 	usbhsf_fifo_unselect(pipe, pipe->fifo);
876 
877 	return 0;
878 }
879 
880 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
881 	.prepare	= usbhsf_dma_prepare_push,
882 	.dma_done	= usbhsf_dma_push_done,
883 };
884 
885 /*
886  *		DMA pop handler
887  */
usbhsf_dma_try_pop(struct usbhs_pkt * pkt,int * is_done)888 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
889 {
890 	struct usbhs_pipe *pipe = pkt->pipe;
891 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
892 	struct usbhs_fifo *fifo;
893 	int len, ret;
894 
895 	if (usbhs_pipe_is_busy(pipe))
896 		return 0;
897 
898 	if (usbhs_pipe_is_dcp(pipe))
899 		goto usbhsf_pio_prepare_pop;
900 
901 	/* get enable DMA fifo */
902 	fifo = usbhsf_get_dma_fifo(priv, pkt);
903 	if (!fifo)
904 		goto usbhsf_pio_prepare_pop;
905 
906 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
907 		goto usbhsf_pio_prepare_pop;
908 
909 	ret = usbhsf_fifo_select(pipe, fifo, 0);
910 	if (ret < 0)
911 		goto usbhsf_pio_prepare_pop;
912 
913 	/* use PIO if packet is less than pio_dma_border */
914 	len = usbhsf_fifo_rcv_len(priv, fifo);
915 	len = min(pkt->length - pkt->actual, len);
916 	if (len % 4) /* 32bit alignment */
917 		goto usbhsf_pio_prepare_pop_unselect;
918 
919 	if (len < usbhs_get_dparam(priv, pio_dma_border))
920 		goto usbhsf_pio_prepare_pop_unselect;
921 
922 	ret = usbhsf_fifo_barrier(priv, fifo);
923 	if (ret < 0)
924 		goto usbhsf_pio_prepare_pop_unselect;
925 
926 	if (usbhsf_dma_map(pkt) < 0)
927 		goto usbhsf_pio_prepare_pop_unselect;
928 
929 	/* DMA */
930 
931 	/*
932 	 * usbhs_fifo_dma_pop_handler :: prepare
933 	 * enabled irq to come here.
934 	 * but it is no longer needed for DMA. disable it.
935 	 */
936 	usbhsf_rx_irq_ctrl(pipe, 0);
937 
938 	pkt->trans = len;
939 
940 	INIT_WORK(&pkt->work, xfer_work);
941 	schedule_work(&pkt->work);
942 
943 	return 0;
944 
945 usbhsf_pio_prepare_pop_unselect:
946 	usbhsf_fifo_unselect(pipe, fifo);
947 usbhsf_pio_prepare_pop:
948 
949 	/*
950 	 * change handler to PIO
951 	 */
952 	pkt->handler = &usbhs_fifo_pio_pop_handler;
953 
954 	return pkt->handler->try_run(pkt, is_done);
955 }
956 
usbhsf_dma_pop_done(struct usbhs_pkt * pkt,int * is_done)957 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
958 {
959 	struct usbhs_pipe *pipe = pkt->pipe;
960 	int maxp = usbhs_pipe_get_maxpacket(pipe);
961 
962 	usbhsf_dma_stop(pipe, pipe->fifo);
963 	usbhsf_dma_unmap(pkt);
964 	usbhsf_fifo_unselect(pipe, pipe->fifo);
965 
966 	pkt->actual += pkt->trans;
967 
968 	if ((pkt->actual == pkt->length) ||	/* receive all data */
969 	    (pkt->trans < maxp)) {		/* short packet */
970 		*is_done = 1;
971 	} else {
972 		/* re-enable */
973 		usbhsf_prepare_pop(pkt, is_done);
974 	}
975 
976 	return 0;
977 }
978 
979 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
980 	.prepare	= usbhsf_prepare_pop,
981 	.try_run	= usbhsf_dma_try_pop,
982 	.dma_done	= usbhsf_dma_pop_done
983 };
984 
985 /*
986  *		DMA setting
987  */
usbhsf_dma_filter(struct dma_chan * chan,void * param)988 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
989 {
990 	struct sh_dmae_slave *slave = param;
991 
992 	/*
993 	 * FIXME
994 	 *
995 	 * usbhs doesn't recognize id = 0 as valid DMA
996 	 */
997 	if (0 == slave->slave_id)
998 		return false;
999 
1000 	chan->private = slave;
1001 
1002 	return true;
1003 }
1004 
usbhsf_dma_quit(struct usbhs_priv * priv,struct usbhs_fifo * fifo)1005 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1006 {
1007 	if (fifo->tx_chan)
1008 		dma_release_channel(fifo->tx_chan);
1009 	if (fifo->rx_chan)
1010 		dma_release_channel(fifo->rx_chan);
1011 
1012 	fifo->tx_chan = NULL;
1013 	fifo->rx_chan = NULL;
1014 }
1015 
usbhsf_dma_init(struct usbhs_priv * priv,struct usbhs_fifo * fifo)1016 static void usbhsf_dma_init(struct usbhs_priv *priv,
1017 			    struct usbhs_fifo *fifo)
1018 {
1019 	struct device *dev = usbhs_priv_to_dev(priv);
1020 	dma_cap_mask_t mask;
1021 
1022 	dma_cap_zero(mask);
1023 	dma_cap_set(DMA_SLAVE, mask);
1024 	fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1025 					    &fifo->tx_slave);
1026 
1027 	dma_cap_zero(mask);
1028 	dma_cap_set(DMA_SLAVE, mask);
1029 	fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1030 					    &fifo->rx_slave);
1031 
1032 	if (fifo->tx_chan || fifo->rx_chan)
1033 		dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1034 			 fifo->name,
1035 			 fifo->tx_chan ? "[TX]" : "    ",
1036 			 fifo->rx_chan ? "[RX]" : "    ");
1037 }
1038 
1039 /*
1040  *		irq functions
1041  */
usbhsf_irq_empty(struct usbhs_priv * priv,struct usbhs_irq_state * irq_state)1042 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1043 			    struct usbhs_irq_state *irq_state)
1044 {
1045 	struct usbhs_pipe *pipe;
1046 	struct device *dev = usbhs_priv_to_dev(priv);
1047 	int i, ret;
1048 
1049 	if (!irq_state->bempsts) {
1050 		dev_err(dev, "debug %s !!\n", __func__);
1051 		return -EIO;
1052 	}
1053 
1054 	dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1055 
1056 	/*
1057 	 * search interrupted "pipe"
1058 	 * not "uep".
1059 	 */
1060 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1061 		if (!(irq_state->bempsts & (1 << i)))
1062 			continue;
1063 
1064 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1065 		if (ret < 0)
1066 			dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1067 	}
1068 
1069 	return 0;
1070 }
1071 
usbhsf_irq_ready(struct usbhs_priv * priv,struct usbhs_irq_state * irq_state)1072 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1073 			    struct usbhs_irq_state *irq_state)
1074 {
1075 	struct usbhs_pipe *pipe;
1076 	struct device *dev = usbhs_priv_to_dev(priv);
1077 	int i, ret;
1078 
1079 	if (!irq_state->brdysts) {
1080 		dev_err(dev, "debug %s !!\n", __func__);
1081 		return -EIO;
1082 	}
1083 
1084 	dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1085 
1086 	/*
1087 	 * search interrupted "pipe"
1088 	 * not "uep".
1089 	 */
1090 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1091 		if (!(irq_state->brdysts & (1 << i)))
1092 			continue;
1093 
1094 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1095 		if (ret < 0)
1096 			dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1097 	}
1098 
1099 	return 0;
1100 }
1101 
usbhsf_dma_complete(void * arg)1102 static void usbhsf_dma_complete(void *arg)
1103 {
1104 	struct usbhs_pipe *pipe = arg;
1105 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1106 	struct device *dev = usbhs_priv_to_dev(priv);
1107 	int ret;
1108 
1109 	ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1110 	if (ret < 0)
1111 		dev_err(dev, "dma_complete run_error %d : %d\n",
1112 			usbhs_pipe_number(pipe), ret);
1113 }
1114 
1115 /*
1116  *		fifo init
1117  */
usbhs_fifo_init(struct usbhs_priv * priv)1118 void usbhs_fifo_init(struct usbhs_priv *priv)
1119 {
1120 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1121 	struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1122 	struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1123 	struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1124 
1125 	mod->irq_empty		= usbhsf_irq_empty;
1126 	mod->irq_ready		= usbhsf_irq_ready;
1127 	mod->irq_bempsts	= 0;
1128 	mod->irq_brdysts	= 0;
1129 
1130 	cfifo->pipe	= NULL;
1131 	cfifo->tx_chan	= NULL;
1132 	cfifo->rx_chan	= NULL;
1133 
1134 	d0fifo->pipe	= NULL;
1135 	d0fifo->tx_chan	= NULL;
1136 	d0fifo->rx_chan	= NULL;
1137 
1138 	d1fifo->pipe	= NULL;
1139 	d1fifo->tx_chan	= NULL;
1140 	d1fifo->rx_chan	= NULL;
1141 
1142 	usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
1143 	usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
1144 }
1145 
usbhs_fifo_quit(struct usbhs_priv * priv)1146 void usbhs_fifo_quit(struct usbhs_priv *priv)
1147 {
1148 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1149 
1150 	mod->irq_empty		= NULL;
1151 	mod->irq_ready		= NULL;
1152 	mod->irq_bempsts	= 0;
1153 	mod->irq_brdysts	= 0;
1154 
1155 	usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1156 	usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1157 }
1158 
usbhs_fifo_probe(struct usbhs_priv * priv)1159 int usbhs_fifo_probe(struct usbhs_priv *priv)
1160 {
1161 	struct usbhs_fifo *fifo;
1162 
1163 	/* CFIFO */
1164 	fifo = usbhsf_get_cfifo(priv);
1165 	fifo->name	= "CFIFO";
1166 	fifo->port	= CFIFO;
1167 	fifo->sel	= CFIFOSEL;
1168 	fifo->ctr	= CFIFOCTR;
1169 
1170 	/* D0FIFO */
1171 	fifo = usbhsf_get_d0fifo(priv);
1172 	fifo->name	= "D0FIFO";
1173 	fifo->port	= D0FIFO;
1174 	fifo->sel	= D0FIFOSEL;
1175 	fifo->ctr	= D0FIFOCTR;
1176 	fifo->tx_slave.slave_id	= usbhs_get_dparam(priv, d0_tx_id);
1177 	fifo->rx_slave.slave_id	= usbhs_get_dparam(priv, d0_rx_id);
1178 
1179 	/* D1FIFO */
1180 	fifo = usbhsf_get_d1fifo(priv);
1181 	fifo->name	= "D1FIFO";
1182 	fifo->port	= D1FIFO;
1183 	fifo->sel	= D1FIFOSEL;
1184 	fifo->ctr	= D1FIFOCTR;
1185 	fifo->tx_slave.slave_id	= usbhs_get_dparam(priv, d1_tx_id);
1186 	fifo->rx_slave.slave_id	= usbhs_get_dparam(priv, d1_rx_id);
1187 
1188 	return 0;
1189 }
1190 
usbhs_fifo_remove(struct usbhs_priv * priv)1191 void usbhs_fifo_remove(struct usbhs_priv *priv)
1192 {
1193 }
1194