1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * cobalt interrupt handling
4 *
5 * Copyright 2012-2015 Cisco Systems, Inc. and/or its affiliates.
6 * All rights reserved.
7 */
8
9 #include <media/i2c/adv7604.h>
10
11 #include "cobalt-driver.h"
12 #include "cobalt-irq.h"
13 #include "cobalt-omnitek.h"
14
cobalt_dma_stream_queue_handler(struct cobalt_stream * s)15 static void cobalt_dma_stream_queue_handler(struct cobalt_stream *s)
16 {
17 struct cobalt *cobalt = s->cobalt;
18 int rx = s->video_channel;
19 struct m00473_freewheel_regmap __iomem *fw =
20 COBALT_CVI_FREEWHEEL(s->cobalt, rx);
21 struct m00233_video_measure_regmap __iomem *vmr =
22 COBALT_CVI_VMR(s->cobalt, rx);
23 struct m00389_cvi_regmap __iomem *cvi =
24 COBALT_CVI(s->cobalt, rx);
25 struct m00479_clk_loss_detector_regmap __iomem *clkloss =
26 COBALT_CVI_CLK_LOSS(s->cobalt, rx);
27 struct cobalt_buffer *cb;
28 bool skip = false;
29
30 spin_lock(&s->irqlock);
31
32 if (list_empty(&s->bufs)) {
33 pr_err("no buffers!\n");
34 spin_unlock(&s->irqlock);
35 return;
36 }
37
38 /* Give the fresh filled up buffer to the user.
39 * Note that the interrupt is only sent if the DMA can continue
40 * with a new buffer, so it is always safe to return this buffer
41 * to userspace. */
42 cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
43 list_del(&cb->list);
44 spin_unlock(&s->irqlock);
45
46 if (s->is_audio || s->is_output)
47 goto done;
48
49 if (s->unstable_frame) {
50 uint32_t stat = ioread32(&vmr->irq_status);
51
52 iowrite32(stat, &vmr->irq_status);
53 if (!(ioread32(&vmr->status) &
54 M00233_STATUS_BITMAP_INIT_DONE_MSK)) {
55 cobalt_dbg(1, "!init_done\n");
56 if (s->enable_freewheel)
57 goto restart_fw;
58 goto done;
59 }
60
61 if (ioread32(&clkloss->status) &
62 M00479_STATUS_BITMAP_CLOCK_MISSING_MSK) {
63 iowrite32(0, &clkloss->ctrl);
64 iowrite32(M00479_CTRL_BITMAP_ENABLE_MSK, &clkloss->ctrl);
65 cobalt_dbg(1, "no clock\n");
66 if (s->enable_freewheel)
67 goto restart_fw;
68 goto done;
69 }
70 if ((stat & (M00233_IRQ_STATUS_BITMAP_VACTIVE_AREA_MSK |
71 M00233_IRQ_STATUS_BITMAP_HACTIVE_AREA_MSK)) ||
72 ioread32(&vmr->vactive_area) != s->timings.bt.height ||
73 ioread32(&vmr->hactive_area) != s->timings.bt.width) {
74 cobalt_dbg(1, "unstable\n");
75 if (s->enable_freewheel)
76 goto restart_fw;
77 goto done;
78 }
79 if (!s->enable_cvi) {
80 s->enable_cvi = true;
81 iowrite32(M00389_CONTROL_BITMAP_ENABLE_MSK, &cvi->control);
82 goto done;
83 }
84 if (!(ioread32(&cvi->status) & M00389_STATUS_BITMAP_LOCK_MSK)) {
85 cobalt_dbg(1, "cvi no lock\n");
86 if (s->enable_freewheel)
87 goto restart_fw;
88 goto done;
89 }
90 if (!s->enable_freewheel) {
91 cobalt_dbg(1, "stable\n");
92 s->enable_freewheel = true;
93 iowrite32(0, &fw->ctrl);
94 goto done;
95 }
96 cobalt_dbg(1, "enabled fw\n");
97 iowrite32(M00233_CONTROL_BITMAP_ENABLE_MEASURE_MSK |
98 M00233_CONTROL_BITMAP_ENABLE_INTERRUPT_MSK,
99 &vmr->control);
100 iowrite32(M00473_CTRL_BITMAP_ENABLE_MSK, &fw->ctrl);
101 s->enable_freewheel = false;
102 s->unstable_frame = false;
103 s->skip_first_frames = 2;
104 skip = true;
105 goto done;
106 }
107 if (ioread32(&fw->status) & M00473_STATUS_BITMAP_FREEWHEEL_MODE_MSK) {
108 restart_fw:
109 cobalt_dbg(1, "lost lock\n");
110 iowrite32(M00233_CONTROL_BITMAP_ENABLE_MEASURE_MSK,
111 &vmr->control);
112 iowrite32(M00473_CTRL_BITMAP_ENABLE_MSK |
113 M00473_CTRL_BITMAP_FORCE_FREEWHEEL_MODE_MSK,
114 &fw->ctrl);
115 iowrite32(0, &cvi->control);
116 s->unstable_frame = true;
117 s->enable_freewheel = false;
118 s->enable_cvi = false;
119 }
120 done:
121 if (s->skip_first_frames) {
122 skip = true;
123 s->skip_first_frames--;
124 }
125 cb->vb.vb2_buf.timestamp = ktime_get_ns();
126 /* TODO: the sequence number should be read from the FPGA so we
127 also know about dropped frames. */
128 cb->vb.sequence = s->sequence++;
129 vb2_buffer_done(&cb->vb.vb2_buf,
130 (skip || s->unstable_frame) ?
131 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
132 }
133
cobalt_irq_handler(int irq,void * dev_id)134 irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
135 {
136 struct cobalt *cobalt = (struct cobalt *)dev_id;
137 u32 dma_interrupt =
138 cobalt_read_bar0(cobalt, DMA_INTERRUPT_STATUS_REG) & 0xffff;
139 u32 mask = cobalt_read_bar1(cobalt, COBALT_SYS_STAT_MASK);
140 u32 edge = cobalt_read_bar1(cobalt, COBALT_SYS_STAT_EDGE);
141 int i;
142
143 /* Clear DMA interrupt */
144 cobalt_write_bar0(cobalt, DMA_INTERRUPT_STATUS_REG, dma_interrupt);
145 cobalt_write_bar1(cobalt, COBALT_SYS_STAT_MASK, mask & ~edge);
146 cobalt_write_bar1(cobalt, COBALT_SYS_STAT_EDGE, edge);
147
148 for (i = 0; i < COBALT_NUM_STREAMS; i++) {
149 struct cobalt_stream *s = &cobalt->streams[i];
150 unsigned dma_fifo_mask = s->dma_fifo_mask;
151
152 if (dma_interrupt & (1 << s->dma_channel)) {
153 cobalt->irq_dma[i]++;
154 /* Give fresh buffer to user and chain newly
155 * queued buffers */
156 cobalt_dma_stream_queue_handler(s);
157 if (!s->is_audio) {
158 edge &= ~dma_fifo_mask;
159 cobalt_write_bar1(cobalt, COBALT_SYS_STAT_MASK,
160 mask & ~edge);
161 }
162 }
163 if (s->is_audio)
164 continue;
165 if (edge & s->adv_irq_mask)
166 set_bit(COBALT_STREAM_FL_ADV_IRQ, &s->flags);
167 if ((edge & mask & dma_fifo_mask) && vb2_is_streaming(&s->q)) {
168 cobalt_info("full rx FIFO %d\n", i);
169 cobalt->irq_full_fifo++;
170 }
171 }
172
173 queue_work(cobalt->irq_work_queues, &cobalt->irq_work_queue);
174
175 if (edge & mask & (COBALT_SYSSTAT_VI0_INT1_MSK |
176 COBALT_SYSSTAT_VI1_INT1_MSK |
177 COBALT_SYSSTAT_VI2_INT1_MSK |
178 COBALT_SYSSTAT_VI3_INT1_MSK |
179 COBALT_SYSSTAT_VIHSMA_INT1_MSK |
180 COBALT_SYSSTAT_VOHSMA_INT1_MSK))
181 cobalt->irq_adv1++;
182 if (edge & mask & (COBALT_SYSSTAT_VI0_INT2_MSK |
183 COBALT_SYSSTAT_VI1_INT2_MSK |
184 COBALT_SYSSTAT_VI2_INT2_MSK |
185 COBALT_SYSSTAT_VI3_INT2_MSK |
186 COBALT_SYSSTAT_VIHSMA_INT2_MSK))
187 cobalt->irq_adv2++;
188 if (edge & mask & COBALT_SYSSTAT_VOHSMA_INT1_MSK)
189 cobalt->irq_advout++;
190 if (dma_interrupt)
191 cobalt->irq_dma_tot++;
192 if (!(edge & mask) && !dma_interrupt)
193 cobalt->irq_none++;
194 dma_interrupt = cobalt_read_bar0(cobalt, DMA_INTERRUPT_STATUS_REG);
195
196 return IRQ_HANDLED;
197 }
198
cobalt_irq_work_handler(struct work_struct * work)199 void cobalt_irq_work_handler(struct work_struct *work)
200 {
201 struct cobalt *cobalt =
202 container_of(work, struct cobalt, irq_work_queue);
203 int i;
204
205 for (i = 0; i < COBALT_NUM_NODES; i++) {
206 struct cobalt_stream *s = &cobalt->streams[i];
207
208 if (test_and_clear_bit(COBALT_STREAM_FL_ADV_IRQ, &s->flags)) {
209 u32 mask;
210
211 v4l2_subdev_call(cobalt->streams[i].sd, core,
212 interrupt_service_routine, 0, NULL);
213 mask = cobalt_read_bar1(cobalt, COBALT_SYS_STAT_MASK);
214 cobalt_write_bar1(cobalt, COBALT_SYS_STAT_MASK,
215 mask | s->adv_irq_mask);
216 }
217 }
218 }
219
cobalt_irq_log_status(struct cobalt * cobalt)220 void cobalt_irq_log_status(struct cobalt *cobalt)
221 {
222 u32 mask;
223 int i;
224
225 cobalt_info("irq: adv1=%u adv2=%u advout=%u none=%u full=%u\n",
226 cobalt->irq_adv1, cobalt->irq_adv2, cobalt->irq_advout,
227 cobalt->irq_none, cobalt->irq_full_fifo);
228 cobalt_info("irq: dma_tot=%u (", cobalt->irq_dma_tot);
229 for (i = 0; i < COBALT_NUM_STREAMS; i++)
230 pr_cont("%s%u", i ? "/" : "", cobalt->irq_dma[i]);
231 pr_cont(")\n");
232 cobalt->irq_dma_tot = cobalt->irq_adv1 = cobalt->irq_adv2 = 0;
233 cobalt->irq_advout = cobalt->irq_none = cobalt->irq_full_fifo = 0;
234 memset(cobalt->irq_dma, 0, sizeof(cobalt->irq_dma));
235
236 mask = cobalt_read_bar1(cobalt, COBALT_SYS_STAT_MASK);
237 cobalt_write_bar1(cobalt, COBALT_SYS_STAT_MASK,
238 mask |
239 COBALT_SYSSTAT_VI0_LOST_DATA_MSK |
240 COBALT_SYSSTAT_VI1_LOST_DATA_MSK |
241 COBALT_SYSSTAT_VI2_LOST_DATA_MSK |
242 COBALT_SYSSTAT_VI3_LOST_DATA_MSK |
243 COBALT_SYSSTAT_VIHSMA_LOST_DATA_MSK |
244 COBALT_SYSSTAT_VOHSMA_LOST_DATA_MSK |
245 COBALT_SYSSTAT_AUD_IN_LOST_DATA_MSK |
246 COBALT_SYSSTAT_AUD_OUT_LOST_DATA_MSK);
247 }
248