1 /*
2  * Samsung TV Mixer driver
3  *
4  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5  *
6  * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published
10  * by the Free Software Foundiation. either version 2 of the License,
11  * or (at your option) any later version
12  */
13 
14 #include "mixer.h"
15 #include "regs-mixer.h"
16 #include "regs-vp.h"
17 
18 #include <linux/delay.h>
19 
20 /* Register access subroutines */
21 
vp_read(struct mxr_device * mdev,u32 reg_id)22 static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
23 {
24 	return readl(mdev->res.vp_regs + reg_id);
25 }
26 
vp_write(struct mxr_device * mdev,u32 reg_id,u32 val)27 static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
28 {
29 	writel(val, mdev->res.vp_regs + reg_id);
30 }
31 
vp_write_mask(struct mxr_device * mdev,u32 reg_id,u32 val,u32 mask)32 static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
33 	u32 val, u32 mask)
34 {
35 	u32 old = vp_read(mdev, reg_id);
36 
37 	val = (val & mask) | (old & ~mask);
38 	writel(val, mdev->res.vp_regs + reg_id);
39 }
40 
mxr_read(struct mxr_device * mdev,u32 reg_id)41 static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
42 {
43 	return readl(mdev->res.mxr_regs + reg_id);
44 }
45 
mxr_write(struct mxr_device * mdev,u32 reg_id,u32 val)46 static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
47 {
48 	writel(val, mdev->res.mxr_regs + reg_id);
49 }
50 
mxr_write_mask(struct mxr_device * mdev,u32 reg_id,u32 val,u32 mask)51 static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
52 	u32 val, u32 mask)
53 {
54 	u32 old = mxr_read(mdev, reg_id);
55 
56 	val = (val & mask) | (old & ~mask);
57 	writel(val, mdev->res.mxr_regs + reg_id);
58 }
59 
mxr_vsync_set_update(struct mxr_device * mdev,int en)60 void mxr_vsync_set_update(struct mxr_device *mdev, int en)
61 {
62 	/* block update on vsync */
63 	mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
64 		MXR_STATUS_SYNC_ENABLE);
65 	vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
66 }
67 
__mxr_reg_vp_reset(struct mxr_device * mdev)68 static void __mxr_reg_vp_reset(struct mxr_device *mdev)
69 {
70 	int tries = 100;
71 
72 	vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
73 	for (tries = 100; tries; --tries) {
74 		/* waiting until VP_SRESET_PROCESSING is 0 */
75 		if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
76 			break;
77 		mdelay(10);
78 	}
79 	WARN(tries == 0, "failed to reset Video Processor\n");
80 }
81 
82 static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
83 
mxr_reg_reset(struct mxr_device * mdev)84 void mxr_reg_reset(struct mxr_device *mdev)
85 {
86 	unsigned long flags;
87 	u32 val; /* value stored to register */
88 
89 	spin_lock_irqsave(&mdev->reg_slock, flags);
90 	mxr_vsync_set_update(mdev, MXR_DISABLE);
91 
92 	/* set output in RGB888 mode */
93 	mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
94 
95 	/* 16 beat burst in DMA */
96 	mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
97 		MXR_STATUS_BURST_MASK);
98 
99 	/* setting default layer priority: layer1 > video > layer0
100 	 * because typical usage scenario would be
101 	 * layer0 - framebuffer
102 	 * video - video overlay
103 	 * layer1 - OSD
104 	 */
105 	val  = MXR_LAYER_CFG_GRP0_VAL(1);
106 	val |= MXR_LAYER_CFG_VP_VAL(2);
107 	val |= MXR_LAYER_CFG_GRP1_VAL(3);
108 	mxr_write(mdev, MXR_LAYER_CFG, val);
109 
110 	/* use dark gray background color */
111 	mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
112 	mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
113 	mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
114 
115 	/* setting graphical layers */
116 
117 	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
118 	val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
119 	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
120 
121 	/* the same configuration for both layers */
122 	mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
123 	mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
124 
125 	/* configuration of Video Processor Registers */
126 	__mxr_reg_vp_reset(mdev);
127 	mxr_reg_vp_default_filter(mdev);
128 
129 	/* enable all interrupts */
130 	mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
131 
132 	mxr_vsync_set_update(mdev, MXR_ENABLE);
133 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
134 }
135 
mxr_reg_graph_format(struct mxr_device * mdev,int idx,const struct mxr_format * fmt,const struct mxr_geometry * geo)136 void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
137 	const struct mxr_format *fmt, const struct mxr_geometry *geo)
138 {
139 	u32 val;
140 	unsigned long flags;
141 
142 	spin_lock_irqsave(&mdev->reg_slock, flags);
143 	mxr_vsync_set_update(mdev, MXR_DISABLE);
144 
145 	/* setup format */
146 	mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
147 		MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
148 
149 	/* setup geometry */
150 	mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
151 	val  = MXR_GRP_WH_WIDTH(geo->src.width);
152 	val |= MXR_GRP_WH_HEIGHT(geo->src.height);
153 	val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
154 	val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
155 	mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
156 
157 	/* setup offsets in source image */
158 	val  = MXR_GRP_SXY_SX(geo->src.x_offset);
159 	val |= MXR_GRP_SXY_SY(geo->src.y_offset);
160 	mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
161 
162 	/* setup offsets in display image */
163 	val  = MXR_GRP_DXY_DX(geo->dst.x_offset);
164 	val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
165 	mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
166 
167 	mxr_vsync_set_update(mdev, MXR_ENABLE);
168 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
169 }
170 
mxr_reg_vp_format(struct mxr_device * mdev,const struct mxr_format * fmt,const struct mxr_geometry * geo)171 void mxr_reg_vp_format(struct mxr_device *mdev,
172 	const struct mxr_format *fmt, const struct mxr_geometry *geo)
173 {
174 	unsigned long flags;
175 
176 	spin_lock_irqsave(&mdev->reg_slock, flags);
177 	mxr_vsync_set_update(mdev, MXR_DISABLE);
178 
179 	vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
180 
181 	/* setting size of input image */
182 	vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
183 		VP_IMG_VSIZE(geo->src.full_height));
184 	/* chroma height has to reduced by 2 to avoid chroma distorions */
185 	vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
186 		VP_IMG_VSIZE(geo->src.full_height / 2));
187 
188 	vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
189 	vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
190 	vp_write(mdev, VP_SRC_H_POSITION,
191 		VP_SRC_H_POSITION_VAL(geo->src.x_offset));
192 	vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
193 
194 	vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
195 	vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
196 	if (geo->dst.field == V4L2_FIELD_INTERLACED) {
197 		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
198 		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
199 	} else {
200 		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
201 		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
202 	}
203 
204 	vp_write(mdev, VP_H_RATIO, geo->x_ratio);
205 	vp_write(mdev, VP_V_RATIO, geo->y_ratio);
206 
207 	vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
208 
209 	mxr_vsync_set_update(mdev, MXR_ENABLE);
210 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
211 
212 }
213 
mxr_reg_graph_buffer(struct mxr_device * mdev,int idx,dma_addr_t addr)214 void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
215 {
216 	u32 val = addr ? ~0 : 0;
217 	unsigned long flags;
218 
219 	spin_lock_irqsave(&mdev->reg_slock, flags);
220 	mxr_vsync_set_update(mdev, MXR_DISABLE);
221 
222 	if (idx == 0)
223 		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
224 	else
225 		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
226 	mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
227 
228 	mxr_vsync_set_update(mdev, MXR_ENABLE);
229 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
230 }
231 
mxr_reg_vp_buffer(struct mxr_device * mdev,dma_addr_t luma_addr[2],dma_addr_t chroma_addr[2])232 void mxr_reg_vp_buffer(struct mxr_device *mdev,
233 	dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
234 {
235 	u32 val = luma_addr[0] ? ~0 : 0;
236 	unsigned long flags;
237 
238 	spin_lock_irqsave(&mdev->reg_slock, flags);
239 	mxr_vsync_set_update(mdev, MXR_DISABLE);
240 
241 	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
242 	vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
243 	/* TODO: fix tiled mode */
244 	vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
245 	vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
246 	vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
247 	vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
248 
249 	mxr_vsync_set_update(mdev, MXR_ENABLE);
250 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
251 }
252 
mxr_irq_layer_handle(struct mxr_layer * layer)253 static void mxr_irq_layer_handle(struct mxr_layer *layer)
254 {
255 	struct list_head *head = &layer->enq_list;
256 	struct mxr_buffer *done;
257 
258 	/* skip non-existing layer */
259 	if (layer == NULL)
260 		return;
261 
262 	spin_lock(&layer->enq_slock);
263 	if (layer->state == MXR_LAYER_IDLE)
264 		goto done;
265 
266 	done = layer->shadow_buf;
267 	layer->shadow_buf = layer->update_buf;
268 
269 	if (list_empty(head)) {
270 		if (layer->state != MXR_LAYER_STREAMING)
271 			layer->update_buf = NULL;
272 	} else {
273 		struct mxr_buffer *next;
274 		next = list_first_entry(head, struct mxr_buffer, list);
275 		list_del(&next->list);
276 		layer->update_buf = next;
277 	}
278 
279 	layer->ops.buffer_set(layer, layer->update_buf);
280 
281 	if (done && done != layer->shadow_buf)
282 		vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
283 
284 done:
285 	spin_unlock(&layer->enq_slock);
286 }
287 
mxr_irq_handler(int irq,void * dev_data)288 irqreturn_t mxr_irq_handler(int irq, void *dev_data)
289 {
290 	struct mxr_device *mdev = dev_data;
291 	u32 i, val;
292 
293 	spin_lock(&mdev->reg_slock);
294 	val = mxr_read(mdev, MXR_INT_STATUS);
295 
296 	/* wake up process waiting for VSYNC */
297 	if (val & MXR_INT_STATUS_VSYNC) {
298 		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
299 		wake_up(&mdev->event_queue);
300 	}
301 
302 	/* clear interrupts */
303 	if (~val & MXR_INT_EN_VSYNC) {
304 		/* vsync interrupt use different bit for read and clear */
305 		val &= ~MXR_INT_EN_VSYNC;
306 		val |= MXR_INT_CLEAR_VSYNC;
307 	}
308 	mxr_write(mdev, MXR_INT_STATUS, val);
309 
310 	spin_unlock(&mdev->reg_slock);
311 	/* leave on non-vsync event */
312 	if (~val & MXR_INT_CLEAR_VSYNC)
313 		return IRQ_HANDLED;
314 	for (i = 0; i < MXR_MAX_LAYERS; ++i)
315 		mxr_irq_layer_handle(mdev->layer[i]);
316 	return IRQ_HANDLED;
317 }
318 
mxr_reg_s_output(struct mxr_device * mdev,int cookie)319 void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
320 {
321 	u32 val;
322 
323 	val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
324 	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
325 }
326 
mxr_reg_streamon(struct mxr_device * mdev)327 void mxr_reg_streamon(struct mxr_device *mdev)
328 {
329 	unsigned long flags;
330 
331 	spin_lock_irqsave(&mdev->reg_slock, flags);
332 	/* single write -> no need to block vsync update */
333 
334 	/* start MIXER */
335 	mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
336 
337 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
338 }
339 
mxr_reg_streamoff(struct mxr_device * mdev)340 void mxr_reg_streamoff(struct mxr_device *mdev)
341 {
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&mdev->reg_slock, flags);
345 	/* single write -> no need to block vsync update */
346 
347 	/* stop MIXER */
348 	mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
349 
350 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
351 }
352 
mxr_reg_wait4vsync(struct mxr_device * mdev)353 int mxr_reg_wait4vsync(struct mxr_device *mdev)
354 {
355 	int ret;
356 
357 	clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
358 	/* TODO: consider adding interruptible */
359 	ret = wait_event_timeout(mdev->event_queue,
360 		test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
361 		msecs_to_jiffies(1000));
362 	if (ret > 0)
363 		return 0;
364 	if (ret < 0)
365 		return ret;
366 	mxr_warn(mdev, "no vsync detected - timeout\n");
367 	return -ETIME;
368 }
369 
mxr_reg_set_mbus_fmt(struct mxr_device * mdev,struct v4l2_mbus_framefmt * fmt)370 void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
371 	struct v4l2_mbus_framefmt *fmt)
372 {
373 	u32 val = 0;
374 	unsigned long flags;
375 
376 	spin_lock_irqsave(&mdev->reg_slock, flags);
377 	mxr_vsync_set_update(mdev, MXR_DISABLE);
378 
379 	/* selecting colorspace accepted by output */
380 	if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
381 		val |= MXR_CFG_OUT_YUV444;
382 	else
383 		val |= MXR_CFG_OUT_RGB888;
384 
385 	/* choosing between interlace and progressive mode */
386 	if (fmt->field == V4L2_FIELD_INTERLACED)
387 		val |= MXR_CFG_SCAN_INTERLACE;
388 	else
389 		val |= MXR_CFG_SCAN_PROGRASSIVE;
390 
391 	/* choosing between porper HD and SD mode */
392 	if (fmt->height == 480)
393 		val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
394 	else if (fmt->height == 576)
395 		val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
396 	else if (fmt->height == 720)
397 		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
398 	else if (fmt->height == 1080)
399 		val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
400 	else
401 		WARN(1, "unrecognized mbus height %u!\n", fmt->height);
402 
403 	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
404 		MXR_CFG_OUT_MASK);
405 
406 	val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
407 	vp_write_mask(mdev, VP_MODE, val,
408 		VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
409 
410 	mxr_vsync_set_update(mdev, MXR_ENABLE);
411 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
412 }
413 
mxr_reg_graph_layer_stream(struct mxr_device * mdev,int idx,int en)414 void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
415 {
416 	/* no extra actions need to be done */
417 }
418 
mxr_reg_vp_layer_stream(struct mxr_device * mdev,int en)419 void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
420 {
421 	/* no extra actions need to be done */
422 }
423 
424 static const u8 filter_y_horiz_tap8[] = {
425 	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
426 	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
427 	0,	2,	4,	5,	6,	6,	6,	6,
428 	6,	5,	5,	4,	3,	2,	1,	1,
429 	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
430 	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
431 	127,	126,	125,	121,	114,	107,	99,	89,
432 	79,	68,	57,	46,	35,	25,	16,	8,
433 };
434 
435 static const u8 filter_y_vert_tap4[] = {
436 	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
437 	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
438 	127,	126,	124,	118,	111,	102,	92,	81,
439 	70,	59,	48,	37,	27,	19,	11,	5,
440 	0,	5,	11,	19,	27,	37,	48,	59,
441 	70,	81,	92,	102,	111,	118,	124,	126,
442 	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
443 	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
444 };
445 
446 static const u8 filter_cr_horiz_tap4[] = {
447 	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
448 	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
449 	127,	126,	124,	118,	111,	102,	92,	81,
450 	70,	59,	48,	37,	27,	19,	11,	5,
451 };
452 
mxr_reg_vp_filter_set(struct mxr_device * mdev,int reg_id,const u8 * data,unsigned int size)453 static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
454 	int reg_id, const u8 *data, unsigned int size)
455 {
456 	/* assure 4-byte align */
457 	BUG_ON(size & 3);
458 	for (; size; size -= 4, reg_id += 4, data += 4) {
459 		u32 val = (data[0] << 24) |  (data[1] << 16) |
460 			(data[2] << 8) | data[3];
461 		vp_write(mdev, reg_id, val);
462 	}
463 }
464 
mxr_reg_vp_default_filter(struct mxr_device * mdev)465 static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
466 {
467 	mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
468 		filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
469 	mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
470 		filter_y_vert_tap4, sizeof filter_y_vert_tap4);
471 	mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
472 		filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
473 }
474 
mxr_reg_mxr_dump(struct mxr_device * mdev)475 static void mxr_reg_mxr_dump(struct mxr_device *mdev)
476 {
477 #define DUMPREG(reg_id) \
478 do { \
479 	mxr_dbg(mdev, #reg_id " = %08x\n", \
480 		(u32)readl(mdev->res.mxr_regs + reg_id)); \
481 } while (0)
482 
483 	DUMPREG(MXR_STATUS);
484 	DUMPREG(MXR_CFG);
485 	DUMPREG(MXR_INT_EN);
486 	DUMPREG(MXR_INT_STATUS);
487 
488 	DUMPREG(MXR_LAYER_CFG);
489 	DUMPREG(MXR_VIDEO_CFG);
490 
491 	DUMPREG(MXR_GRAPHIC0_CFG);
492 	DUMPREG(MXR_GRAPHIC0_BASE);
493 	DUMPREG(MXR_GRAPHIC0_SPAN);
494 	DUMPREG(MXR_GRAPHIC0_WH);
495 	DUMPREG(MXR_GRAPHIC0_SXY);
496 	DUMPREG(MXR_GRAPHIC0_DXY);
497 
498 	DUMPREG(MXR_GRAPHIC1_CFG);
499 	DUMPREG(MXR_GRAPHIC1_BASE);
500 	DUMPREG(MXR_GRAPHIC1_SPAN);
501 	DUMPREG(MXR_GRAPHIC1_WH);
502 	DUMPREG(MXR_GRAPHIC1_SXY);
503 	DUMPREG(MXR_GRAPHIC1_DXY);
504 #undef DUMPREG
505 }
506 
mxr_reg_vp_dump(struct mxr_device * mdev)507 static void mxr_reg_vp_dump(struct mxr_device *mdev)
508 {
509 #define DUMPREG(reg_id) \
510 do { \
511 	mxr_dbg(mdev, #reg_id " = %08x\n", \
512 		(u32) readl(mdev->res.vp_regs + reg_id)); \
513 } while (0)
514 
515 
516 	DUMPREG(VP_ENABLE);
517 	DUMPREG(VP_SRESET);
518 	DUMPREG(VP_SHADOW_UPDATE);
519 	DUMPREG(VP_FIELD_ID);
520 	DUMPREG(VP_MODE);
521 	DUMPREG(VP_IMG_SIZE_Y);
522 	DUMPREG(VP_IMG_SIZE_C);
523 	DUMPREG(VP_PER_RATE_CTRL);
524 	DUMPREG(VP_TOP_Y_PTR);
525 	DUMPREG(VP_BOT_Y_PTR);
526 	DUMPREG(VP_TOP_C_PTR);
527 	DUMPREG(VP_BOT_C_PTR);
528 	DUMPREG(VP_ENDIAN_MODE);
529 	DUMPREG(VP_SRC_H_POSITION);
530 	DUMPREG(VP_SRC_V_POSITION);
531 	DUMPREG(VP_SRC_WIDTH);
532 	DUMPREG(VP_SRC_HEIGHT);
533 	DUMPREG(VP_DST_H_POSITION);
534 	DUMPREG(VP_DST_V_POSITION);
535 	DUMPREG(VP_DST_WIDTH);
536 	DUMPREG(VP_DST_HEIGHT);
537 	DUMPREG(VP_H_RATIO);
538 	DUMPREG(VP_V_RATIO);
539 
540 #undef DUMPREG
541 }
542 
mxr_reg_dump(struct mxr_device * mdev)543 void mxr_reg_dump(struct mxr_device *mdev)
544 {
545 	mxr_reg_mxr_dump(mdev);
546 	mxr_reg_vp_dump(mdev);
547 }
548 
549