1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xilinx XADC driver
4 *
5 * Copyright 2013-2014 Analog Devices Inc.
6 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 *
8 * Documentation for the parts can be found at:
9 * - XADC hardmacro: Xilinx UG480
10 * - ZYNQ XADC interface: Xilinx UG585
11 * - AXI XADC interface: Xilinx PG019
12 */
13
14 #include <linux/clk.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/overflow.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/sysfs.h>
26
27 #include <linux/iio/buffer.h>
28 #include <linux/iio/events.h>
29 #include <linux/iio/iio.h>
30 #include <linux/iio/sysfs.h>
31 #include <linux/iio/trigger.h>
32 #include <linux/iio/trigger_consumer.h>
33 #include <linux/iio/triggered_buffer.h>
34
35 #include "xilinx-xadc.h"
36
37 static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
38
39 /* ZYNQ register definitions */
40 #define XADC_ZYNQ_REG_CFG 0x00
41 #define XADC_ZYNQ_REG_INTSTS 0x04
42 #define XADC_ZYNQ_REG_INTMSK 0x08
43 #define XADC_ZYNQ_REG_STATUS 0x0c
44 #define XADC_ZYNQ_REG_CFIFO 0x10
45 #define XADC_ZYNQ_REG_DFIFO 0x14
46 #define XADC_ZYNQ_REG_CTL 0x18
47
48 #define XADC_ZYNQ_CFG_ENABLE BIT(31)
49 #define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
50 #define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
51 #define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
52 #define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
53 #define XADC_ZYNQ_CFG_WEDGE BIT(13)
54 #define XADC_ZYNQ_CFG_REDGE BIT(12)
55 #define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
56 #define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
57 #define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
58 #define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
59 #define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
60 #define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
61 #define XADC_ZYNQ_CFG_IGAP(x) (x)
62
63 #define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
64 #define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
65 #define XADC_ZYNQ_INT_ALARM_MASK 0xff
66 #define XADC_ZYNQ_INT_ALARM_OFFSET 0
67
68 #define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
69 #define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
70 #define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
71 #define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
72 #define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
73 #define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
74 #define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
75 #define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
76 #define XADC_ZYNQ_STATUS_OT BIT(7)
77 #define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
78
79 #define XADC_ZYNQ_CTL_RESET BIT(4)
80
81 #define XADC_ZYNQ_CMD_NOP 0x00
82 #define XADC_ZYNQ_CMD_READ 0x01
83 #define XADC_ZYNQ_CMD_WRITE 0x02
84
85 #define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
86
87 /* AXI register definitions */
88 #define XADC_AXI_REG_RESET 0x00
89 #define XADC_AXI_REG_STATUS 0x04
90 #define XADC_AXI_REG_ALARM_STATUS 0x08
91 #define XADC_AXI_REG_CONVST 0x0c
92 #define XADC_AXI_REG_XADC_RESET 0x10
93 #define XADC_AXI_REG_GIER 0x5c
94 #define XADC_AXI_REG_IPISR 0x60
95 #define XADC_AXI_REG_IPIER 0x68
96
97 /* 7 Series */
98 #define XADC_7S_AXI_ADC_REG_OFFSET 0x200
99
100 /* UltraScale */
101 #define XADC_US_AXI_ADC_REG_OFFSET 0x400
102
103 #define XADC_AXI_RESET_MAGIC 0xa
104 #define XADC_AXI_GIER_ENABLE BIT(31)
105
106 #define XADC_AXI_INT_EOS BIT(4)
107 #define XADC_AXI_INT_ALARM_MASK 0x3c0f
108
109 #define XADC_FLAGS_BUFFERED BIT(0)
110 #define XADC_FLAGS_IRQ_OPTIONAL BIT(1)
111
112 /*
113 * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
114 * not have a hardware FIFO. Which means an interrupt is generated for each
115 * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
116 * overloaded by the interrupts that it soft-lockups. For this reason the driver
117 * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
118 * but still responsive.
119 */
120 #define XADC_MAX_SAMPLERATE 150000
121
xadc_write_reg(struct xadc * xadc,unsigned int reg,uint32_t val)122 static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
123 uint32_t val)
124 {
125 writel(val, xadc->base + reg);
126 }
127
xadc_read_reg(struct xadc * xadc,unsigned int reg,uint32_t * val)128 static void xadc_read_reg(struct xadc *xadc, unsigned int reg,
129 uint32_t *val)
130 {
131 *val = readl(xadc->base + reg);
132 }
133
134 /*
135 * The ZYNQ interface uses two asynchronous FIFOs for communication with the
136 * XADC. Reads and writes to the XADC register are performed by submitting a
137 * request to the command FIFO (CFIFO), once the request has been completed the
138 * result can be read from the data FIFO (DFIFO). The method currently used in
139 * this driver is to submit the request for a read/write operation, then go to
140 * sleep and wait for an interrupt that signals that a response is available in
141 * the data FIFO.
142 */
143
xadc_zynq_write_fifo(struct xadc * xadc,uint32_t * cmd,unsigned int n)144 static void xadc_zynq_write_fifo(struct xadc *xadc, uint32_t *cmd,
145 unsigned int n)
146 {
147 unsigned int i;
148
149 for (i = 0; i < n; i++)
150 xadc_write_reg(xadc, XADC_ZYNQ_REG_CFIFO, cmd[i]);
151 }
152
xadc_zynq_drain_fifo(struct xadc * xadc)153 static void xadc_zynq_drain_fifo(struct xadc *xadc)
154 {
155 uint32_t status, tmp;
156
157 xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
158
159 while (!(status & XADC_ZYNQ_STATUS_DFIFOE)) {
160 xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
161 xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
162 }
163 }
164
xadc_zynq_update_intmsk(struct xadc * xadc,unsigned int mask,unsigned int val)165 static void xadc_zynq_update_intmsk(struct xadc *xadc, unsigned int mask,
166 unsigned int val)
167 {
168 xadc->zynq_intmask &= ~mask;
169 xadc->zynq_intmask |= val;
170
171 xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK,
172 xadc->zynq_intmask | xadc->zynq_masked_alarm);
173 }
174
xadc_zynq_write_adc_reg(struct xadc * xadc,unsigned int reg,uint16_t val)175 static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
176 uint16_t val)
177 {
178 uint32_t cmd[1];
179 uint32_t tmp;
180 int ret;
181
182 spin_lock_irq(&xadc->lock);
183 xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
184 XADC_ZYNQ_INT_DFIFO_GTH);
185
186 reinit_completion(&xadc->completion);
187
188 cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE, reg, val);
189 xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
190 xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
191 tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
192 tmp |= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
193 xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
194
195 xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
196 spin_unlock_irq(&xadc->lock);
197
198 ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
199 if (ret == 0)
200 ret = -EIO;
201 else
202 ret = 0;
203
204 xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
205
206 return ret;
207 }
208
xadc_zynq_read_adc_reg(struct xadc * xadc,unsigned int reg,uint16_t * val)209 static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
210 uint16_t *val)
211 {
212 uint32_t cmd[2];
213 uint32_t resp, tmp;
214 int ret;
215
216 cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ, reg, 0);
217 cmd[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP, 0, 0);
218
219 spin_lock_irq(&xadc->lock);
220 xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
221 XADC_ZYNQ_INT_DFIFO_GTH);
222 xadc_zynq_drain_fifo(xadc);
223 reinit_completion(&xadc->completion);
224
225 xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
226 xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
227 tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
228 tmp |= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
229 xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
230
231 xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
232 spin_unlock_irq(&xadc->lock);
233 ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
234 if (ret == 0)
235 ret = -EIO;
236 if (ret < 0)
237 return ret;
238
239 xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
240 xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
241
242 *val = resp & 0xffff;
243
244 return 0;
245 }
246
xadc_zynq_transform_alarm(unsigned int alarm)247 static unsigned int xadc_zynq_transform_alarm(unsigned int alarm)
248 {
249 return ((alarm & 0x80) >> 4) |
250 ((alarm & 0x78) << 1) |
251 (alarm & 0x07);
252 }
253
254 /*
255 * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
256 * threshold condition go way from within the interrupt handler, this means as
257 * soon as a threshold condition is present we would enter the interrupt handler
258 * again and again. To work around this we mask all active thresholds interrupts
259 * in the interrupt handler and start a timer. In this timer we poll the
260 * interrupt status and only if the interrupt is inactive we unmask it again.
261 */
xadc_zynq_unmask_worker(struct work_struct * work)262 static void xadc_zynq_unmask_worker(struct work_struct *work)
263 {
264 struct xadc *xadc = container_of(work, struct xadc, zynq_unmask_work.work);
265 unsigned int misc_sts, unmask;
266
267 xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &misc_sts);
268
269 misc_sts &= XADC_ZYNQ_INT_ALARM_MASK;
270
271 spin_lock_irq(&xadc->lock);
272
273 /* Clear those bits which are not active anymore */
274 unmask = (xadc->zynq_masked_alarm ^ misc_sts) & xadc->zynq_masked_alarm;
275 xadc->zynq_masked_alarm &= misc_sts;
276
277 /* Also clear those which are masked out anyway */
278 xadc->zynq_masked_alarm &= ~xadc->zynq_intmask;
279
280 /* Clear the interrupts before we unmask them */
281 xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, unmask);
282
283 xadc_zynq_update_intmsk(xadc, 0, 0);
284
285 spin_unlock_irq(&xadc->lock);
286
287 /* if still pending some alarm re-trigger the timer */
288 if (xadc->zynq_masked_alarm) {
289 schedule_delayed_work(&xadc->zynq_unmask_work,
290 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
291 }
292
293 }
294
xadc_zynq_interrupt_handler(int irq,void * devid)295 static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
296 {
297 struct iio_dev *indio_dev = devid;
298 struct xadc *xadc = iio_priv(indio_dev);
299 uint32_t status;
300
301 xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
302
303 status &= ~(xadc->zynq_intmask | xadc->zynq_masked_alarm);
304
305 if (!status)
306 return IRQ_NONE;
307
308 spin_lock(&xadc->lock);
309
310 xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
311
312 if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
313 xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
314 XADC_ZYNQ_INT_DFIFO_GTH);
315 complete(&xadc->completion);
316 }
317
318 status &= XADC_ZYNQ_INT_ALARM_MASK;
319 if (status) {
320 xadc->zynq_masked_alarm |= status;
321 /*
322 * mask the current event interrupt,
323 * unmask it when the interrupt is no more active.
324 */
325 xadc_zynq_update_intmsk(xadc, 0, 0);
326
327 xadc_handle_events(indio_dev,
328 xadc_zynq_transform_alarm(status));
329
330 /* unmask the required interrupts in timer. */
331 schedule_delayed_work(&xadc->zynq_unmask_work,
332 msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
333 }
334 spin_unlock(&xadc->lock);
335
336 return IRQ_HANDLED;
337 }
338
339 #define XADC_ZYNQ_TCK_RATE_MAX 50000000
340 #define XADC_ZYNQ_IGAP_DEFAULT 20
341 #define XADC_ZYNQ_PCAP_RATE_MAX 200000000
342
xadc_zynq_setup(struct platform_device * pdev,struct iio_dev * indio_dev,int irq)343 static int xadc_zynq_setup(struct platform_device *pdev,
344 struct iio_dev *indio_dev, int irq)
345 {
346 struct xadc *xadc = iio_priv(indio_dev);
347 unsigned long pcap_rate;
348 unsigned int tck_div;
349 unsigned int div;
350 unsigned int igap;
351 unsigned int tck_rate;
352 int ret;
353
354 /* TODO: Figure out how to make igap and tck_rate configurable */
355 igap = XADC_ZYNQ_IGAP_DEFAULT;
356 tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
357
358 xadc->zynq_intmask = ~0;
359
360 pcap_rate = clk_get_rate(xadc->clk);
361 if (!pcap_rate)
362 return -EINVAL;
363
364 if (pcap_rate > XADC_ZYNQ_PCAP_RATE_MAX) {
365 ret = clk_set_rate(xadc->clk,
366 (unsigned long)XADC_ZYNQ_PCAP_RATE_MAX);
367 if (ret)
368 return ret;
369 }
370
371 if (tck_rate > pcap_rate / 2) {
372 div = 2;
373 } else {
374 div = pcap_rate / tck_rate;
375 if (pcap_rate / div > XADC_ZYNQ_TCK_RATE_MAX)
376 div++;
377 }
378
379 if (div <= 3)
380 tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV2;
381 else if (div <= 7)
382 tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV4;
383 else if (div <= 15)
384 tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV8;
385 else
386 tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV16;
387
388 xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, XADC_ZYNQ_CTL_RESET);
389 xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, 0);
390 xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, ~0);
391 xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK, xadc->zynq_intmask);
392 xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, XADC_ZYNQ_CFG_ENABLE |
393 XADC_ZYNQ_CFG_REDGE | XADC_ZYNQ_CFG_WEDGE |
394 tck_div | XADC_ZYNQ_CFG_IGAP(igap));
395
396 if (pcap_rate > XADC_ZYNQ_PCAP_RATE_MAX) {
397 ret = clk_set_rate(xadc->clk, pcap_rate);
398 if (ret)
399 return ret;
400 }
401
402 return 0;
403 }
404
xadc_zynq_get_dclk_rate(struct xadc * xadc)405 static unsigned long xadc_zynq_get_dclk_rate(struct xadc *xadc)
406 {
407 unsigned int div;
408 uint32_t val;
409
410 xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &val);
411
412 switch (val & XADC_ZYNQ_CFG_TCKRATE_MASK) {
413 case XADC_ZYNQ_CFG_TCKRATE_DIV4:
414 div = 4;
415 break;
416 case XADC_ZYNQ_CFG_TCKRATE_DIV8:
417 div = 8;
418 break;
419 case XADC_ZYNQ_CFG_TCKRATE_DIV16:
420 div = 16;
421 break;
422 default:
423 div = 2;
424 break;
425 }
426
427 return clk_get_rate(xadc->clk) / div;
428 }
429
xadc_zynq_update_alarm(struct xadc * xadc,unsigned int alarm)430 static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
431 {
432 unsigned long flags;
433 uint32_t status;
434
435 /* Move OT to bit 7 */
436 alarm = ((alarm & 0x08) << 4) | ((alarm & 0xf0) >> 1) | (alarm & 0x07);
437
438 spin_lock_irqsave(&xadc->lock, flags);
439
440 /* Clear previous interrupts if any. */
441 xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
442 xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status & alarm);
443
444 xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_ALARM_MASK,
445 ~alarm & XADC_ZYNQ_INT_ALARM_MASK);
446
447 spin_unlock_irqrestore(&xadc->lock, flags);
448 }
449
450 static const struct xadc_ops xadc_zynq_ops = {
451 .read = xadc_zynq_read_adc_reg,
452 .write = xadc_zynq_write_adc_reg,
453 .setup = xadc_zynq_setup,
454 .get_dclk_rate = xadc_zynq_get_dclk_rate,
455 .interrupt_handler = xadc_zynq_interrupt_handler,
456 .update_alarm = xadc_zynq_update_alarm,
457 .type = XADC_TYPE_S7,
458 };
459
460 static const unsigned int xadc_axi_reg_offsets[] = {
461 [XADC_TYPE_S7] = XADC_7S_AXI_ADC_REG_OFFSET,
462 [XADC_TYPE_US] = XADC_US_AXI_ADC_REG_OFFSET,
463 };
464
xadc_axi_read_adc_reg(struct xadc * xadc,unsigned int reg,uint16_t * val)465 static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
466 uint16_t *val)
467 {
468 uint32_t val32;
469
470 xadc_read_reg(xadc, xadc_axi_reg_offsets[xadc->ops->type] + reg * 4,
471 &val32);
472 *val = val32 & 0xffff;
473
474 return 0;
475 }
476
xadc_axi_write_adc_reg(struct xadc * xadc,unsigned int reg,uint16_t val)477 static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
478 uint16_t val)
479 {
480 xadc_write_reg(xadc, xadc_axi_reg_offsets[xadc->ops->type] + reg * 4,
481 val);
482
483 return 0;
484 }
485
xadc_axi_setup(struct platform_device * pdev,struct iio_dev * indio_dev,int irq)486 static int xadc_axi_setup(struct platform_device *pdev,
487 struct iio_dev *indio_dev, int irq)
488 {
489 struct xadc *xadc = iio_priv(indio_dev);
490
491 xadc_write_reg(xadc, XADC_AXI_REG_RESET, XADC_AXI_RESET_MAGIC);
492 xadc_write_reg(xadc, XADC_AXI_REG_GIER, XADC_AXI_GIER_ENABLE);
493
494 return 0;
495 }
496
xadc_axi_interrupt_handler(int irq,void * devid)497 static irqreturn_t xadc_axi_interrupt_handler(int irq, void *devid)
498 {
499 struct iio_dev *indio_dev = devid;
500 struct xadc *xadc = iio_priv(indio_dev);
501 uint32_t status, mask;
502 unsigned int events;
503
504 xadc_read_reg(xadc, XADC_AXI_REG_IPISR, &status);
505 xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &mask);
506 status &= mask;
507
508 if (!status)
509 return IRQ_NONE;
510
511 if ((status & XADC_AXI_INT_EOS) && xadc->trigger)
512 iio_trigger_poll(xadc->trigger);
513
514 if (status & XADC_AXI_INT_ALARM_MASK) {
515 /*
516 * The order of the bits in the AXI-XADC status register does
517 * not match the order of the bits in the XADC alarm enable
518 * register. xadc_handle_events() expects the events to be in
519 * the same order as the XADC alarm enable register.
520 */
521 events = (status & 0x000e) >> 1;
522 events |= (status & 0x0001) << 3;
523 events |= (status & 0x3c00) >> 6;
524 xadc_handle_events(indio_dev, events);
525 }
526
527 xadc_write_reg(xadc, XADC_AXI_REG_IPISR, status);
528
529 return IRQ_HANDLED;
530 }
531
xadc_axi_update_alarm(struct xadc * xadc,unsigned int alarm)532 static void xadc_axi_update_alarm(struct xadc *xadc, unsigned int alarm)
533 {
534 uint32_t val;
535 unsigned long flags;
536
537 /*
538 * The order of the bits in the AXI-XADC status register does not match
539 * the order of the bits in the XADC alarm enable register. We get
540 * passed the alarm mask in the same order as in the XADC alarm enable
541 * register.
542 */
543 alarm = ((alarm & 0x07) << 1) | ((alarm & 0x08) >> 3) |
544 ((alarm & 0xf0) << 6);
545
546 spin_lock_irqsave(&xadc->lock, flags);
547 xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
548 val &= ~XADC_AXI_INT_ALARM_MASK;
549 val |= alarm;
550 xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
551 spin_unlock_irqrestore(&xadc->lock, flags);
552 }
553
xadc_axi_get_dclk(struct xadc * xadc)554 static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
555 {
556 return clk_get_rate(xadc->clk);
557 }
558
559 static const struct xadc_ops xadc_7s_axi_ops = {
560 .read = xadc_axi_read_adc_reg,
561 .write = xadc_axi_write_adc_reg,
562 .setup = xadc_axi_setup,
563 .get_dclk_rate = xadc_axi_get_dclk,
564 .update_alarm = xadc_axi_update_alarm,
565 .interrupt_handler = xadc_axi_interrupt_handler,
566 .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
567 .type = XADC_TYPE_S7,
568 };
569
570 static const struct xadc_ops xadc_us_axi_ops = {
571 .read = xadc_axi_read_adc_reg,
572 .write = xadc_axi_write_adc_reg,
573 .setup = xadc_axi_setup,
574 .get_dclk_rate = xadc_axi_get_dclk,
575 .update_alarm = xadc_axi_update_alarm,
576 .interrupt_handler = xadc_axi_interrupt_handler,
577 .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
578 .type = XADC_TYPE_US,
579 };
580
_xadc_update_adc_reg(struct xadc * xadc,unsigned int reg,uint16_t mask,uint16_t val)581 static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
582 uint16_t mask, uint16_t val)
583 {
584 uint16_t tmp;
585 int ret;
586
587 ret = _xadc_read_adc_reg(xadc, reg, &tmp);
588 if (ret)
589 return ret;
590
591 return _xadc_write_adc_reg(xadc, reg, (tmp & ~mask) | val);
592 }
593
xadc_update_adc_reg(struct xadc * xadc,unsigned int reg,uint16_t mask,uint16_t val)594 static int xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
595 uint16_t mask, uint16_t val)
596 {
597 int ret;
598
599 mutex_lock(&xadc->mutex);
600 ret = _xadc_update_adc_reg(xadc, reg, mask, val);
601 mutex_unlock(&xadc->mutex);
602
603 return ret;
604 }
605
xadc_get_dclk_rate(struct xadc * xadc)606 static unsigned long xadc_get_dclk_rate(struct xadc *xadc)
607 {
608 return xadc->ops->get_dclk_rate(xadc);
609 }
610
xadc_update_scan_mode(struct iio_dev * indio_dev,const unsigned long * mask)611 static int xadc_update_scan_mode(struct iio_dev *indio_dev,
612 const unsigned long *mask)
613 {
614 struct xadc *xadc = iio_priv(indio_dev);
615 size_t new_size, n;
616 void *data;
617
618 n = bitmap_weight(mask, indio_dev->masklength);
619
620 if (check_mul_overflow(n, sizeof(*xadc->data), &new_size))
621 return -ENOMEM;
622
623 data = devm_krealloc(indio_dev->dev.parent, xadc->data,
624 new_size, GFP_KERNEL);
625 if (!data)
626 return -ENOMEM;
627
628 memset(data, 0, new_size);
629 xadc->data = data;
630
631 return 0;
632 }
633
xadc_scan_index_to_channel(unsigned int scan_index)634 static unsigned int xadc_scan_index_to_channel(unsigned int scan_index)
635 {
636 switch (scan_index) {
637 case 5:
638 return XADC_REG_VCCPINT;
639 case 6:
640 return XADC_REG_VCCPAUX;
641 case 7:
642 return XADC_REG_VCCO_DDR;
643 case 8:
644 return XADC_REG_TEMP;
645 case 9:
646 return XADC_REG_VCCINT;
647 case 10:
648 return XADC_REG_VCCAUX;
649 case 11:
650 return XADC_REG_VPVN;
651 case 12:
652 return XADC_REG_VREFP;
653 case 13:
654 return XADC_REG_VREFN;
655 case 14:
656 return XADC_REG_VCCBRAM;
657 default:
658 return XADC_REG_VAUX(scan_index - 16);
659 }
660 }
661
xadc_trigger_handler(int irq,void * p)662 static irqreturn_t xadc_trigger_handler(int irq, void *p)
663 {
664 struct iio_poll_func *pf = p;
665 struct iio_dev *indio_dev = pf->indio_dev;
666 struct xadc *xadc = iio_priv(indio_dev);
667 unsigned int chan;
668 int i, j;
669
670 if (!xadc->data)
671 goto out;
672
673 j = 0;
674 for_each_set_bit(i, indio_dev->active_scan_mask,
675 indio_dev->masklength) {
676 chan = xadc_scan_index_to_channel(i);
677 xadc_read_adc_reg(xadc, chan, &xadc->data[j]);
678 j++;
679 }
680
681 iio_push_to_buffers(indio_dev, xadc->data);
682
683 out:
684 iio_trigger_notify_done(indio_dev->trig);
685
686 return IRQ_HANDLED;
687 }
688
xadc_trigger_set_state(struct iio_trigger * trigger,bool state)689 static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
690 {
691 struct xadc *xadc = iio_trigger_get_drvdata(trigger);
692 unsigned long flags;
693 unsigned int convst;
694 unsigned int val;
695 int ret = 0;
696
697 mutex_lock(&xadc->mutex);
698
699 if (state) {
700 /* Only one of the two triggers can be active at a time. */
701 if (xadc->trigger != NULL) {
702 ret = -EBUSY;
703 goto err_out;
704 } else {
705 xadc->trigger = trigger;
706 if (trigger == xadc->convst_trigger)
707 convst = XADC_CONF0_EC;
708 else
709 convst = 0;
710 }
711 ret = _xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF0_EC,
712 convst);
713 if (ret)
714 goto err_out;
715 } else {
716 xadc->trigger = NULL;
717 }
718
719 spin_lock_irqsave(&xadc->lock, flags);
720 xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
721 xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
722 if (state)
723 val |= XADC_AXI_INT_EOS;
724 else
725 val &= ~XADC_AXI_INT_EOS;
726 xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
727 spin_unlock_irqrestore(&xadc->lock, flags);
728
729 err_out:
730 mutex_unlock(&xadc->mutex);
731
732 return ret;
733 }
734
735 static const struct iio_trigger_ops xadc_trigger_ops = {
736 .set_trigger_state = &xadc_trigger_set_state,
737 };
738
xadc_alloc_trigger(struct iio_dev * indio_dev,const char * name)739 static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
740 const char *name)
741 {
742 struct device *dev = indio_dev->dev.parent;
743 struct iio_trigger *trig;
744 int ret;
745
746 trig = devm_iio_trigger_alloc(dev, "%s%d-%s", indio_dev->name,
747 iio_device_id(indio_dev), name);
748 if (trig == NULL)
749 return ERR_PTR(-ENOMEM);
750
751 trig->ops = &xadc_trigger_ops;
752 iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
753
754 ret = devm_iio_trigger_register(dev, trig);
755 if (ret)
756 return ERR_PTR(ret);
757
758 return trig;
759 }
760
xadc_power_adc_b(struct xadc * xadc,unsigned int seq_mode)761 static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
762 {
763 uint16_t val;
764
765 /*
766 * As per datasheet the power-down bits are don't care in the
767 * UltraScale, but as per reality setting the power-down bit for the
768 * non-existing ADC-B powers down the main ADC, so just return and don't
769 * do anything.
770 */
771 if (xadc->ops->type == XADC_TYPE_US)
772 return 0;
773
774 /* Powerdown the ADC-B when it is not needed. */
775 switch (seq_mode) {
776 case XADC_CONF1_SEQ_SIMULTANEOUS:
777 case XADC_CONF1_SEQ_INDEPENDENT:
778 val = 0;
779 break;
780 default:
781 val = XADC_CONF2_PD_ADC_B;
782 break;
783 }
784
785 return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_PD_MASK,
786 val);
787 }
788
xadc_get_seq_mode(struct xadc * xadc,unsigned long scan_mode)789 static int xadc_get_seq_mode(struct xadc *xadc, unsigned long scan_mode)
790 {
791 unsigned int aux_scan_mode = scan_mode >> 16;
792
793 /* UltraScale has only one ADC and supports only continuous mode */
794 if (xadc->ops->type == XADC_TYPE_US)
795 return XADC_CONF1_SEQ_CONTINUOUS;
796
797 if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_DUAL)
798 return XADC_CONF1_SEQ_SIMULTANEOUS;
799
800 if ((aux_scan_mode & 0xff00) == 0 ||
801 (aux_scan_mode & 0x00ff) == 0)
802 return XADC_CONF1_SEQ_CONTINUOUS;
803
804 return XADC_CONF1_SEQ_SIMULTANEOUS;
805 }
806
xadc_postdisable(struct iio_dev * indio_dev)807 static int xadc_postdisable(struct iio_dev *indio_dev)
808 {
809 struct xadc *xadc = iio_priv(indio_dev);
810 unsigned long scan_mask;
811 int ret;
812 int i;
813
814 scan_mask = 1; /* Run calibration as part of the sequence */
815 for (i = 0; i < indio_dev->num_channels; i++)
816 scan_mask |= BIT(indio_dev->channels[i].scan_index);
817
818 /* Enable all channels and calibration */
819 ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
820 if (ret)
821 return ret;
822
823 ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
824 if (ret)
825 return ret;
826
827 ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
828 XADC_CONF1_SEQ_CONTINUOUS);
829 if (ret)
830 return ret;
831
832 return xadc_power_adc_b(xadc, XADC_CONF1_SEQ_CONTINUOUS);
833 }
834
xadc_preenable(struct iio_dev * indio_dev)835 static int xadc_preenable(struct iio_dev *indio_dev)
836 {
837 struct xadc *xadc = iio_priv(indio_dev);
838 unsigned long scan_mask;
839 int seq_mode;
840 int ret;
841
842 ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
843 XADC_CONF1_SEQ_DEFAULT);
844 if (ret)
845 goto err;
846
847 scan_mask = *indio_dev->active_scan_mask;
848 seq_mode = xadc_get_seq_mode(xadc, scan_mask);
849
850 ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
851 if (ret)
852 goto err;
853
854 /*
855 * In simultaneous mode the upper and lower aux channels are samples at
856 * the same time. In this mode the upper 8 bits in the sequencer
857 * register are don't care and the lower 8 bits control two channels
858 * each. As such we must set the bit if either the channel in the lower
859 * group or the upper group is enabled.
860 */
861 if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
862 scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
863
864 ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
865 if (ret)
866 goto err;
867
868 ret = xadc_power_adc_b(xadc, seq_mode);
869 if (ret)
870 goto err;
871
872 ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
873 seq_mode);
874 if (ret)
875 goto err;
876
877 return 0;
878 err:
879 xadc_postdisable(indio_dev);
880 return ret;
881 }
882
883 static const struct iio_buffer_setup_ops xadc_buffer_ops = {
884 .preenable = &xadc_preenable,
885 .postdisable = &xadc_postdisable,
886 };
887
xadc_read_samplerate(struct xadc * xadc)888 static int xadc_read_samplerate(struct xadc *xadc)
889 {
890 unsigned int div;
891 uint16_t val16;
892 int ret;
893
894 ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
895 if (ret)
896 return ret;
897
898 div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
899 if (div < 2)
900 div = 2;
901
902 return xadc_get_dclk_rate(xadc) / div / 26;
903 }
904
xadc_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long info)905 static int xadc_read_raw(struct iio_dev *indio_dev,
906 struct iio_chan_spec const *chan, int *val, int *val2, long info)
907 {
908 struct xadc *xadc = iio_priv(indio_dev);
909 unsigned int bits = chan->scan_type.realbits;
910 uint16_t val16;
911 int ret;
912
913 switch (info) {
914 case IIO_CHAN_INFO_RAW:
915 if (iio_buffer_enabled(indio_dev))
916 return -EBUSY;
917 ret = xadc_read_adc_reg(xadc, chan->address, &val16);
918 if (ret < 0)
919 return ret;
920
921 val16 >>= chan->scan_type.shift;
922 if (chan->scan_type.sign == 'u')
923 *val = val16;
924 else
925 *val = sign_extend32(val16, bits - 1);
926
927 return IIO_VAL_INT;
928 case IIO_CHAN_INFO_SCALE:
929 switch (chan->type) {
930 case IIO_VOLTAGE:
931 /* V = (val * 3.0) / 2**bits */
932 switch (chan->address) {
933 case XADC_REG_VCCINT:
934 case XADC_REG_VCCAUX:
935 case XADC_REG_VREFP:
936 case XADC_REG_VREFN:
937 case XADC_REG_VCCBRAM:
938 case XADC_REG_VCCPINT:
939 case XADC_REG_VCCPAUX:
940 case XADC_REG_VCCO_DDR:
941 *val = 3000;
942 break;
943 default:
944 *val = 1000;
945 break;
946 }
947 *val2 = bits;
948 return IIO_VAL_FRACTIONAL_LOG2;
949 case IIO_TEMP:
950 /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
951 *val = 503975;
952 *val2 = bits;
953 return IIO_VAL_FRACTIONAL_LOG2;
954 default:
955 return -EINVAL;
956 }
957 case IIO_CHAN_INFO_OFFSET:
958 /* Only the temperature channel has an offset */
959 *val = -((273150 << bits) / 503975);
960 return IIO_VAL_INT;
961 case IIO_CHAN_INFO_SAMP_FREQ:
962 ret = xadc_read_samplerate(xadc);
963 if (ret < 0)
964 return ret;
965
966 *val = ret;
967 return IIO_VAL_INT;
968 default:
969 return -EINVAL;
970 }
971 }
972
xadc_write_samplerate(struct xadc * xadc,int val)973 static int xadc_write_samplerate(struct xadc *xadc, int val)
974 {
975 unsigned long clk_rate = xadc_get_dclk_rate(xadc);
976 unsigned int div;
977
978 if (!clk_rate)
979 return -EINVAL;
980
981 if (val <= 0)
982 return -EINVAL;
983
984 /* Max. 150 kSPS */
985 if (val > XADC_MAX_SAMPLERATE)
986 val = XADC_MAX_SAMPLERATE;
987
988 val *= 26;
989
990 /* Min 1MHz */
991 if (val < 1000000)
992 val = 1000000;
993
994 /*
995 * We want to round down, but only if we do not exceed the 150 kSPS
996 * limit.
997 */
998 div = clk_rate / val;
999 if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
1000 div++;
1001 if (div < 2)
1002 div = 2;
1003 else if (div > 0xff)
1004 div = 0xff;
1005
1006 return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_DIV_MASK,
1007 div << XADC_CONF2_DIV_OFFSET);
1008 }
1009
xadc_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long info)1010 static int xadc_write_raw(struct iio_dev *indio_dev,
1011 struct iio_chan_spec const *chan, int val, int val2, long info)
1012 {
1013 struct xadc *xadc = iio_priv(indio_dev);
1014
1015 if (info != IIO_CHAN_INFO_SAMP_FREQ)
1016 return -EINVAL;
1017
1018 return xadc_write_samplerate(xadc, val);
1019 }
1020
1021 static const struct iio_event_spec xadc_temp_events[] = {
1022 {
1023 .type = IIO_EV_TYPE_THRESH,
1024 .dir = IIO_EV_DIR_RISING,
1025 .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
1026 BIT(IIO_EV_INFO_VALUE) |
1027 BIT(IIO_EV_INFO_HYSTERESIS),
1028 },
1029 };
1030
1031 /* Separate values for upper and lower thresholds, but only a shared enabled */
1032 static const struct iio_event_spec xadc_voltage_events[] = {
1033 {
1034 .type = IIO_EV_TYPE_THRESH,
1035 .dir = IIO_EV_DIR_RISING,
1036 .mask_separate = BIT(IIO_EV_INFO_VALUE),
1037 }, {
1038 .type = IIO_EV_TYPE_THRESH,
1039 .dir = IIO_EV_DIR_FALLING,
1040 .mask_separate = BIT(IIO_EV_INFO_VALUE),
1041 }, {
1042 .type = IIO_EV_TYPE_THRESH,
1043 .dir = IIO_EV_DIR_EITHER,
1044 .mask_separate = BIT(IIO_EV_INFO_ENABLE),
1045 },
1046 };
1047
1048 #define XADC_CHAN_TEMP(_chan, _scan_index, _addr, _bits) { \
1049 .type = IIO_TEMP, \
1050 .indexed = 1, \
1051 .channel = (_chan), \
1052 .address = (_addr), \
1053 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
1054 BIT(IIO_CHAN_INFO_SCALE) | \
1055 BIT(IIO_CHAN_INFO_OFFSET), \
1056 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
1057 .event_spec = xadc_temp_events, \
1058 .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
1059 .scan_index = (_scan_index), \
1060 .scan_type = { \
1061 .sign = 'u', \
1062 .realbits = (_bits), \
1063 .storagebits = 16, \
1064 .shift = 16 - (_bits), \
1065 .endianness = IIO_CPU, \
1066 }, \
1067 }
1068
1069 #define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _bits, _ext, _alarm) { \
1070 .type = IIO_VOLTAGE, \
1071 .indexed = 1, \
1072 .channel = (_chan), \
1073 .address = (_addr), \
1074 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
1075 BIT(IIO_CHAN_INFO_SCALE), \
1076 .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
1077 .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
1078 .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
1079 .scan_index = (_scan_index), \
1080 .scan_type = { \
1081 .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
1082 .realbits = (_bits), \
1083 .storagebits = 16, \
1084 .shift = 16 - (_bits), \
1085 .endianness = IIO_CPU, \
1086 }, \
1087 .extend_name = _ext, \
1088 }
1089
1090 /* 7 Series */
1091 #define XADC_7S_CHAN_TEMP(_chan, _scan_index, _addr) \
1092 XADC_CHAN_TEMP(_chan, _scan_index, _addr, 12)
1093 #define XADC_7S_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) \
1094 XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, 12, _ext, _alarm)
1095
1096 static const struct iio_chan_spec xadc_7s_channels[] = {
1097 XADC_7S_CHAN_TEMP(0, 8, XADC_REG_TEMP),
1098 XADC_7S_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
1099 XADC_7S_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
1100 XADC_7S_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
1101 XADC_7S_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
1102 XADC_7S_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
1103 XADC_7S_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
1104 XADC_7S_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
1105 XADC_7S_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
1106 XADC_7S_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
1107 XADC_7S_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
1108 XADC_7S_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
1109 XADC_7S_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
1110 XADC_7S_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
1111 XADC_7S_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
1112 XADC_7S_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
1113 XADC_7S_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
1114 XADC_7S_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
1115 XADC_7S_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
1116 XADC_7S_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
1117 XADC_7S_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
1118 XADC_7S_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
1119 XADC_7S_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
1120 XADC_7S_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
1121 XADC_7S_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
1122 XADC_7S_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
1123 };
1124
1125 /* UltraScale */
1126 #define XADC_US_CHAN_TEMP(_chan, _scan_index, _addr) \
1127 XADC_CHAN_TEMP(_chan, _scan_index, _addr, 10)
1128 #define XADC_US_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) \
1129 XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, 10, _ext, _alarm)
1130
1131 static const struct iio_chan_spec xadc_us_channels[] = {
1132 XADC_US_CHAN_TEMP(0, 8, XADC_REG_TEMP),
1133 XADC_US_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
1134 XADC_US_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
1135 XADC_US_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
1136 XADC_US_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpsintlp", true),
1137 XADC_US_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpsintfp", true),
1138 XADC_US_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccpsaux", true),
1139 XADC_US_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
1140 XADC_US_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
1141 XADC_US_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
1142 XADC_US_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
1143 XADC_US_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
1144 XADC_US_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
1145 XADC_US_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
1146 XADC_US_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
1147 XADC_US_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
1148 XADC_US_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
1149 XADC_US_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
1150 XADC_US_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
1151 XADC_US_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
1152 XADC_US_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
1153 XADC_US_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
1154 XADC_US_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
1155 XADC_US_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
1156 XADC_US_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
1157 XADC_US_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
1158 };
1159
1160 static const struct iio_info xadc_info = {
1161 .read_raw = &xadc_read_raw,
1162 .write_raw = &xadc_write_raw,
1163 .read_event_config = &xadc_read_event_config,
1164 .write_event_config = &xadc_write_event_config,
1165 .read_event_value = &xadc_read_event_value,
1166 .write_event_value = &xadc_write_event_value,
1167 .update_scan_mode = &xadc_update_scan_mode,
1168 };
1169
1170 static const struct of_device_id xadc_of_match_table[] = {
1171 {
1172 .compatible = "xlnx,zynq-xadc-1.00.a",
1173 .data = &xadc_zynq_ops
1174 }, {
1175 .compatible = "xlnx,axi-xadc-1.00.a",
1176 .data = &xadc_7s_axi_ops
1177 }, {
1178 .compatible = "xlnx,system-management-wiz-1.3",
1179 .data = &xadc_us_axi_ops
1180 },
1181 { },
1182 };
1183 MODULE_DEVICE_TABLE(of, xadc_of_match_table);
1184
xadc_parse_dt(struct iio_dev * indio_dev,struct device_node * np,unsigned int * conf,int irq)1185 static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
1186 unsigned int *conf, int irq)
1187 {
1188 struct device *dev = indio_dev->dev.parent;
1189 struct xadc *xadc = iio_priv(indio_dev);
1190 const struct iio_chan_spec *channel_templates;
1191 struct iio_chan_spec *channels, *chan;
1192 struct device_node *chan_node, *child;
1193 unsigned int max_channels;
1194 unsigned int num_channels;
1195 const char *external_mux;
1196 u32 ext_mux_chan;
1197 u32 reg;
1198 int ret;
1199 int i;
1200
1201 *conf = 0;
1202
1203 ret = of_property_read_string(np, "xlnx,external-mux", &external_mux);
1204 if (ret < 0 || strcasecmp(external_mux, "none") == 0)
1205 xadc->external_mux_mode = XADC_EXTERNAL_MUX_NONE;
1206 else if (strcasecmp(external_mux, "single") == 0)
1207 xadc->external_mux_mode = XADC_EXTERNAL_MUX_SINGLE;
1208 else if (strcasecmp(external_mux, "dual") == 0)
1209 xadc->external_mux_mode = XADC_EXTERNAL_MUX_DUAL;
1210 else
1211 return -EINVAL;
1212
1213 if (xadc->external_mux_mode != XADC_EXTERNAL_MUX_NONE) {
1214 ret = of_property_read_u32(np, "xlnx,external-mux-channel",
1215 &ext_mux_chan);
1216 if (ret < 0)
1217 return ret;
1218
1219 if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_SINGLE) {
1220 if (ext_mux_chan == 0)
1221 ext_mux_chan = XADC_REG_VPVN;
1222 else if (ext_mux_chan <= 16)
1223 ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
1224 else
1225 return -EINVAL;
1226 } else {
1227 if (ext_mux_chan > 0 && ext_mux_chan <= 8)
1228 ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
1229 else
1230 return -EINVAL;
1231 }
1232
1233 *conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
1234 }
1235 if (xadc->ops->type == XADC_TYPE_S7) {
1236 channel_templates = xadc_7s_channels;
1237 max_channels = ARRAY_SIZE(xadc_7s_channels);
1238 } else {
1239 channel_templates = xadc_us_channels;
1240 max_channels = ARRAY_SIZE(xadc_us_channels);
1241 }
1242 channels = devm_kmemdup(dev, channel_templates,
1243 sizeof(channels[0]) * max_channels, GFP_KERNEL);
1244 if (!channels)
1245 return -ENOMEM;
1246
1247 num_channels = 9;
1248 chan = &channels[9];
1249
1250 chan_node = of_get_child_by_name(np, "xlnx,channels");
1251 if (chan_node) {
1252 for_each_child_of_node(chan_node, child) {
1253 if (num_channels >= max_channels) {
1254 of_node_put(child);
1255 break;
1256 }
1257
1258 ret = of_property_read_u32(child, "reg", ®);
1259 if (ret || reg > 16)
1260 continue;
1261
1262 if (of_property_read_bool(child, "xlnx,bipolar"))
1263 chan->scan_type.sign = 's';
1264
1265 if (reg == 0) {
1266 chan->scan_index = 11;
1267 chan->address = XADC_REG_VPVN;
1268 } else {
1269 chan->scan_index = 15 + reg;
1270 chan->address = XADC_REG_VAUX(reg - 1);
1271 }
1272 num_channels++;
1273 chan++;
1274 }
1275 }
1276 of_node_put(chan_node);
1277
1278 /* No IRQ => no events */
1279 if (irq <= 0) {
1280 for (i = 0; i < num_channels; i++) {
1281 channels[i].event_spec = NULL;
1282 channels[i].num_event_specs = 0;
1283 }
1284 }
1285
1286 indio_dev->num_channels = num_channels;
1287 indio_dev->channels = devm_krealloc(dev, channels,
1288 sizeof(*channels) * num_channels,
1289 GFP_KERNEL);
1290 /* If we can't resize the channels array, just use the original */
1291 if (!indio_dev->channels)
1292 indio_dev->channels = channels;
1293
1294 return 0;
1295 }
1296
1297 static const char * const xadc_type_names[] = {
1298 [XADC_TYPE_S7] = "xadc",
1299 [XADC_TYPE_US] = "xilinx-system-monitor",
1300 };
1301
xadc_clk_disable_unprepare(void * data)1302 static void xadc_clk_disable_unprepare(void *data)
1303 {
1304 struct clk *clk = data;
1305
1306 clk_disable_unprepare(clk);
1307 }
1308
xadc_cancel_delayed_work(void * data)1309 static void xadc_cancel_delayed_work(void *data)
1310 {
1311 struct delayed_work *work = data;
1312
1313 cancel_delayed_work_sync(work);
1314 }
1315
xadc_probe(struct platform_device * pdev)1316 static int xadc_probe(struct platform_device *pdev)
1317 {
1318 struct device *dev = &pdev->dev;
1319 const struct of_device_id *id;
1320 const struct xadc_ops *ops;
1321 struct iio_dev *indio_dev;
1322 unsigned int bipolar_mask;
1323 unsigned int conf0;
1324 struct xadc *xadc;
1325 int ret;
1326 int irq;
1327 int i;
1328
1329 if (!dev->of_node)
1330 return -ENODEV;
1331
1332 id = of_match_node(xadc_of_match_table, dev->of_node);
1333 if (!id)
1334 return -EINVAL;
1335
1336 ops = id->data;
1337
1338 irq = platform_get_irq_optional(pdev, 0);
1339 if (irq < 0 &&
1340 (irq != -ENXIO || !(ops->flags & XADC_FLAGS_IRQ_OPTIONAL)))
1341 return irq;
1342
1343 indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc));
1344 if (!indio_dev)
1345 return -ENOMEM;
1346
1347 xadc = iio_priv(indio_dev);
1348 xadc->ops = id->data;
1349 init_completion(&xadc->completion);
1350 mutex_init(&xadc->mutex);
1351 spin_lock_init(&xadc->lock);
1352 INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
1353
1354 xadc->base = devm_platform_ioremap_resource(pdev, 0);
1355 if (IS_ERR(xadc->base))
1356 return PTR_ERR(xadc->base);
1357
1358 indio_dev->name = xadc_type_names[xadc->ops->type];
1359 indio_dev->modes = INDIO_DIRECT_MODE;
1360 indio_dev->info = &xadc_info;
1361
1362 ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0, irq);
1363 if (ret)
1364 return ret;
1365
1366 if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
1367 ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
1368 &iio_pollfunc_store_time,
1369 &xadc_trigger_handler,
1370 &xadc_buffer_ops);
1371 if (ret)
1372 return ret;
1373
1374 if (irq > 0) {
1375 xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
1376 if (IS_ERR(xadc->convst_trigger))
1377 return PTR_ERR(xadc->convst_trigger);
1378
1379 xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
1380 "samplerate");
1381 if (IS_ERR(xadc->samplerate_trigger))
1382 return PTR_ERR(xadc->samplerate_trigger);
1383 }
1384 }
1385
1386 xadc->clk = devm_clk_get(dev, NULL);
1387 if (IS_ERR(xadc->clk))
1388 return PTR_ERR(xadc->clk);
1389
1390 ret = clk_prepare_enable(xadc->clk);
1391 if (ret)
1392 return ret;
1393
1394 ret = devm_add_action_or_reset(dev,
1395 xadc_clk_disable_unprepare, xadc->clk);
1396 if (ret)
1397 return ret;
1398
1399 /*
1400 * Make sure not to exceed the maximum samplerate since otherwise the
1401 * resulting interrupt storm will soft-lock the system.
1402 */
1403 if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
1404 ret = xadc_read_samplerate(xadc);
1405 if (ret < 0)
1406 return ret;
1407
1408 if (ret > XADC_MAX_SAMPLERATE) {
1409 ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
1410 if (ret < 0)
1411 return ret;
1412 }
1413 }
1414
1415 if (irq > 0) {
1416 ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler,
1417 0, dev_name(dev), indio_dev);
1418 if (ret)
1419 return ret;
1420
1421 ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
1422 &xadc->zynq_unmask_work);
1423 if (ret)
1424 return ret;
1425 }
1426
1427 ret = xadc->ops->setup(pdev, indio_dev, irq);
1428 if (ret)
1429 return ret;
1430
1431 for (i = 0; i < 16; i++)
1432 xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
1433 &xadc->threshold[i]);
1434
1435 ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
1436 if (ret)
1437 return ret;
1438
1439 bipolar_mask = 0;
1440 for (i = 0; i < indio_dev->num_channels; i++) {
1441 if (indio_dev->channels[i].scan_type.sign == 's')
1442 bipolar_mask |= BIT(indio_dev->channels[i].scan_index);
1443 }
1444
1445 ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
1446 if (ret)
1447 return ret;
1448
1449 ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
1450 bipolar_mask >> 16);
1451 if (ret)
1452 return ret;
1453
1454 /* Disable all alarms */
1455 ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
1456 XADC_CONF1_ALARM_MASK);
1457 if (ret)
1458 return ret;
1459
1460 /* Set thresholds to min/max */
1461 for (i = 0; i < 16; i++) {
1462 /*
1463 * Set max voltage threshold and both temperature thresholds to
1464 * 0xffff, min voltage threshold to 0.
1465 */
1466 if (i % 8 < 4 || i == 7)
1467 xadc->threshold[i] = 0xffff;
1468 else
1469 xadc->threshold[i] = 0;
1470 ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
1471 xadc->threshold[i]);
1472 if (ret)
1473 return ret;
1474 }
1475
1476 /* Go to non-buffered mode */
1477 xadc_postdisable(indio_dev);
1478
1479 return devm_iio_device_register(dev, indio_dev);
1480 }
1481
1482 static struct platform_driver xadc_driver = {
1483 .probe = xadc_probe,
1484 .driver = {
1485 .name = "xadc",
1486 .of_match_table = xadc_of_match_table,
1487 },
1488 };
1489 module_platform_driver(xadc_driver);
1490
1491 MODULE_LICENSE("GPL v2");
1492 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1493 MODULE_DESCRIPTION("Xilinx XADC IIO driver");
1494