1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) ST-Ericsson SA 2010
4  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
5  * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
6  * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
7  * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
8  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
9  * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/err.h>
18 #include <linux/errno.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/irqreturn.h>
22 #include <linux/kernel.h>
23 #include <linux/klist.h>
24 #include <linux/module.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/platform_device.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/semaphore.h>
29 #include <linux/platform_data/dma-ste-dma40.h>
30 
31 #include <crypto/aes.h>
32 #include <crypto/ctr.h>
33 #include <crypto/internal/des.h>
34 #include <crypto/internal/skcipher.h>
35 #include <crypto/scatterwalk.h>
36 
37 #include <linux/platform_data/crypto-ux500.h>
38 
39 #include "cryp_p.h"
40 #include "cryp.h"
41 
42 #define CRYP_MAX_KEY_SIZE	32
43 #define BYTES_PER_WORD		4
44 
45 static int cryp_mode;
46 static atomic_t session_id;
47 
48 static struct stedma40_chan_cfg *mem_to_engine;
49 static struct stedma40_chan_cfg *engine_to_mem;
50 
51 /**
52  * struct cryp_driver_data - data specific to the driver.
53  *
54  * @device_list: A list of registered devices to choose from.
55  * @device_allocation: A semaphore initialized with number of devices.
56  */
57 struct cryp_driver_data {
58 	struct klist device_list;
59 	struct semaphore device_allocation;
60 };
61 
62 /**
63  * struct cryp_ctx - Crypto context
64  * @config: Crypto mode.
65  * @key: Key array.
66  * @keylen: Length of key.
67  * @iv: Pointer to initialization vector.
68  * @indata: Pointer to indata.
69  * @outdata: Pointer to outdata.
70  * @datalen: Length of indata.
71  * @outlen: Length of outdata.
72  * @blocksize: Size of blocks.
73  * @updated: Updated flag.
74  * @dev_ctx: Device dependent context.
75  * @device: Pointer to the device.
76  * @session_id: Atomic session ID.
77  */
78 struct cryp_ctx {
79 	struct cryp_config config;
80 	u8 key[CRYP_MAX_KEY_SIZE];
81 	u32 keylen;
82 	u8 *iv;
83 	const u8 *indata;
84 	u8 *outdata;
85 	u32 datalen;
86 	u32 outlen;
87 	u32 blocksize;
88 	u8 updated;
89 	struct cryp_device_context dev_ctx;
90 	struct cryp_device_data *device;
91 	u32 session_id;
92 };
93 
94 static struct cryp_driver_data driver_data;
95 
96 /**
97  * swap_bits_in_byte - mirror the bits in a byte
98  * @b: the byte to be mirrored
99  *
100  * The bits are swapped the following way:
101  *  Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
102  *  nibble 2 (n2) bits 4-7.
103  *
104  *  Nibble 1 (n1):
105  *  (The "old" (moved) bit is replaced with a zero)
106  *  1. Move bit 6 and 7, 4 positions to the left.
107  *  2. Move bit 3 and 5, 2 positions to the left.
108  *  3. Move bit 1-4, 1 position to the left.
109  *
110  *  Nibble 2 (n2):
111  *  1. Move bit 0 and 1, 4 positions to the right.
112  *  2. Move bit 2 and 4, 2 positions to the right.
113  *  3. Move bit 3-6, 1 position to the right.
114  *
115  *  Combine the two nibbles to a complete and swapped byte.
116  */
117 
swap_bits_in_byte(u8 b)118 static inline u8 swap_bits_in_byte(u8 b)
119 {
120 #define R_SHIFT_4_MASK  0xc0 /* Bits 6 and 7, right shift 4 */
121 #define R_SHIFT_2_MASK  0x28 /* (After right shift 4) Bits 3 and 5,
122 				  right shift 2 */
123 #define R_SHIFT_1_MASK  0x1e /* (After right shift 2) Bits 1-4,
124 				  right shift 1 */
125 #define L_SHIFT_4_MASK  0x03 /* Bits 0 and 1, left shift 4 */
126 #define L_SHIFT_2_MASK  0x14 /* (After left shift 4) Bits 2 and 4,
127 				  left shift 2 */
128 #define L_SHIFT_1_MASK  0x78 /* (After left shift 1) Bits 3-6,
129 				  left shift 1 */
130 
131 	u8 n1;
132 	u8 n2;
133 
134 	/* Swap most significant nibble */
135 	/* Right shift 4, bits 6 and 7 */
136 	n1 = ((b  & R_SHIFT_4_MASK) >> 4) | (b  & ~(R_SHIFT_4_MASK >> 4));
137 	/* Right shift 2, bits 3 and 5 */
138 	n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
139 	/* Right shift 1, bits 1-4 */
140 	n1 = (n1  & R_SHIFT_1_MASK) >> 1;
141 
142 	/* Swap least significant nibble */
143 	/* Left shift 4, bits 0 and 1 */
144 	n2 = ((b  & L_SHIFT_4_MASK) << 4) | (b  & ~(L_SHIFT_4_MASK << 4));
145 	/* Left shift 2, bits 2 and 4 */
146 	n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
147 	/* Left shift 1, bits 3-6 */
148 	n2 = (n2  & L_SHIFT_1_MASK) << 1;
149 
150 	return n1 | n2;
151 }
152 
swap_words_in_key_and_bits_in_byte(const u8 * in,u8 * out,u32 len)153 static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
154 						      u8 *out, u32 len)
155 {
156 	unsigned int i = 0;
157 	int j;
158 	int index = 0;
159 
160 	j = len - BYTES_PER_WORD;
161 	while (j >= 0) {
162 		for (i = 0; i < BYTES_PER_WORD; i++) {
163 			index = len - j - BYTES_PER_WORD + i;
164 			out[j + i] =
165 				swap_bits_in_byte(in[index]);
166 		}
167 		j -= BYTES_PER_WORD;
168 	}
169 }
170 
add_session_id(struct cryp_ctx * ctx)171 static void add_session_id(struct cryp_ctx *ctx)
172 {
173 	/*
174 	 * We never want 0 to be a valid value, since this is the default value
175 	 * for the software context.
176 	 */
177 	if (unlikely(atomic_inc_and_test(&session_id)))
178 		atomic_inc(&session_id);
179 
180 	ctx->session_id = atomic_read(&session_id);
181 }
182 
cryp_interrupt_handler(int irq,void * param)183 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
184 {
185 	struct cryp_ctx *ctx;
186 	int count;
187 	struct cryp_device_data *device_data;
188 
189 	if (param == NULL) {
190 		BUG_ON(!param);
191 		return IRQ_HANDLED;
192 	}
193 
194 	/* The device is coming from the one found in hw_crypt_noxts. */
195 	device_data = (struct cryp_device_data *)param;
196 
197 	ctx = device_data->current_ctx;
198 
199 	if (ctx == NULL) {
200 		BUG_ON(!ctx);
201 		return IRQ_HANDLED;
202 	}
203 
204 	dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
205 		cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
206 		"out" : "in");
207 
208 	if (cryp_pending_irq_src(device_data,
209 				 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
210 		if (ctx->outlen / ctx->blocksize > 0) {
211 			count = ctx->blocksize / 4;
212 
213 			readsl(&device_data->base->dout, ctx->outdata, count);
214 			ctx->outdata += count;
215 			ctx->outlen -= count;
216 
217 			if (ctx->outlen == 0) {
218 				cryp_disable_irq_src(device_data,
219 						     CRYP_IRQ_SRC_OUTPUT_FIFO);
220 			}
221 		}
222 	} else if (cryp_pending_irq_src(device_data,
223 					CRYP_IRQ_SRC_INPUT_FIFO)) {
224 		if (ctx->datalen / ctx->blocksize > 0) {
225 			count = ctx->blocksize / 4;
226 
227 			writesl(&device_data->base->din, ctx->indata, count);
228 
229 			ctx->indata += count;
230 			ctx->datalen -= count;
231 
232 			if (ctx->datalen == 0)
233 				cryp_disable_irq_src(device_data,
234 						   CRYP_IRQ_SRC_INPUT_FIFO);
235 
236 			if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
237 				CRYP_PUT_BITS(&device_data->base->cr,
238 					      CRYP_START_ENABLE,
239 					      CRYP_CR_START_POS,
240 					      CRYP_CR_START_MASK);
241 
242 				cryp_wait_until_done(device_data);
243 			}
244 		}
245 	}
246 
247 	return IRQ_HANDLED;
248 }
249 
mode_is_aes(enum cryp_algo_mode mode)250 static int mode_is_aes(enum cryp_algo_mode mode)
251 {
252 	return	CRYP_ALGO_AES_ECB == mode ||
253 		CRYP_ALGO_AES_CBC == mode ||
254 		CRYP_ALGO_AES_CTR == mode ||
255 		CRYP_ALGO_AES_XTS == mode;
256 }
257 
cfg_iv(struct cryp_device_data * device_data,u32 left,u32 right,enum cryp_init_vector_index index)258 static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
259 		  enum cryp_init_vector_index index)
260 {
261 	struct cryp_init_vector_value vector_value;
262 
263 	dev_dbg(device_data->dev, "[%s]", __func__);
264 
265 	vector_value.init_value_left = left;
266 	vector_value.init_value_right = right;
267 
268 	return cryp_configure_init_vector(device_data,
269 					  index,
270 					  vector_value);
271 }
272 
cfg_ivs(struct cryp_device_data * device_data,struct cryp_ctx * ctx)273 static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
274 {
275 	int i;
276 	int status = 0;
277 	int num_of_regs = ctx->blocksize / 8;
278 	__be32 *civ = (__be32 *)ctx->iv;
279 	u32 iv[AES_BLOCK_SIZE / 4];
280 
281 	dev_dbg(device_data->dev, "[%s]", __func__);
282 
283 	/*
284 	 * Since we loop on num_of_regs we need to have a check in case
285 	 * someone provides an incorrect blocksize which would force calling
286 	 * cfg_iv with i greater than 2 which is an error.
287 	 */
288 	if (num_of_regs > 2) {
289 		dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
290 			__func__, ctx->blocksize);
291 		return -EINVAL;
292 	}
293 
294 	for (i = 0; i < ctx->blocksize / 4; i++)
295 		iv[i] = be32_to_cpup(civ + i);
296 
297 	for (i = 0; i < num_of_regs; i++) {
298 		status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
299 				(enum cryp_init_vector_index) i);
300 		if (status != 0)
301 			return status;
302 	}
303 	return status;
304 }
305 
set_key(struct cryp_device_data * device_data,u32 left_key,u32 right_key,enum cryp_key_reg_index index)306 static int set_key(struct cryp_device_data *device_data,
307 		   u32 left_key,
308 		   u32 right_key,
309 		   enum cryp_key_reg_index index)
310 {
311 	struct cryp_key_value key_value;
312 	int cryp_error;
313 
314 	dev_dbg(device_data->dev, "[%s]", __func__);
315 
316 	key_value.key_value_left = left_key;
317 	key_value.key_value_right = right_key;
318 
319 	cryp_error = cryp_configure_key_values(device_data,
320 					       index,
321 					       key_value);
322 	if (cryp_error != 0)
323 		dev_err(device_data->dev, "[%s]: "
324 			"cryp_configure_key_values() failed!", __func__);
325 
326 	return cryp_error;
327 }
328 
cfg_keys(struct cryp_ctx * ctx)329 static int cfg_keys(struct cryp_ctx *ctx)
330 {
331 	int i;
332 	int num_of_regs = ctx->keylen / 8;
333 	u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
334 	__be32 *ckey = (__be32 *)ctx->key;
335 	int cryp_error = 0;
336 
337 	dev_dbg(ctx->device->dev, "[%s]", __func__);
338 
339 	if (mode_is_aes(ctx->config.algomode)) {
340 		swap_words_in_key_and_bits_in_byte((u8 *)ckey,
341 						   (u8 *)swapped_key,
342 						   ctx->keylen);
343 	} else {
344 		for (i = 0; i < ctx->keylen / 4; i++)
345 			swapped_key[i] = be32_to_cpup(ckey + i);
346 	}
347 
348 	for (i = 0; i < num_of_regs; i++) {
349 		cryp_error = set_key(ctx->device,
350 				     swapped_key[i * 2],
351 				     swapped_key[i * 2 + 1],
352 				     (enum cryp_key_reg_index) i);
353 
354 		if (cryp_error != 0) {
355 			dev_err(ctx->device->dev, "[%s]: set_key() failed!",
356 					__func__);
357 			return cryp_error;
358 		}
359 	}
360 	return cryp_error;
361 }
362 
cryp_setup_context(struct cryp_ctx * ctx,struct cryp_device_data * device_data)363 static int cryp_setup_context(struct cryp_ctx *ctx,
364 			      struct cryp_device_data *device_data)
365 {
366 	u32 control_register = CRYP_CR_DEFAULT;
367 
368 	switch (cryp_mode) {
369 	case CRYP_MODE_INTERRUPT:
370 		writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
371 		break;
372 
373 	case CRYP_MODE_DMA:
374 		writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
375 		break;
376 
377 	default:
378 		break;
379 	}
380 
381 	if (ctx->updated == 0) {
382 		cryp_flush_inoutfifo(device_data);
383 		if (cfg_keys(ctx) != 0) {
384 			dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
385 				__func__);
386 			return -EINVAL;
387 		}
388 
389 		if (ctx->iv &&
390 		    CRYP_ALGO_AES_ECB != ctx->config.algomode &&
391 		    CRYP_ALGO_DES_ECB != ctx->config.algomode &&
392 		    CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
393 			if (cfg_ivs(device_data, ctx) != 0)
394 				return -EPERM;
395 		}
396 
397 		cryp_set_configuration(device_data, &ctx->config,
398 				       &control_register);
399 		add_session_id(ctx);
400 	} else if (ctx->updated == 1 &&
401 		   ctx->session_id != atomic_read(&session_id)) {
402 		cryp_flush_inoutfifo(device_data);
403 		cryp_restore_device_context(device_data, &ctx->dev_ctx);
404 
405 		add_session_id(ctx);
406 		control_register = ctx->dev_ctx.cr;
407 	} else
408 		control_register = ctx->dev_ctx.cr;
409 
410 	writel(control_register |
411 	       (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
412 	       &device_data->base->cr);
413 
414 	return 0;
415 }
416 
cryp_get_device_data(struct cryp_ctx * ctx,struct cryp_device_data ** device_data)417 static int cryp_get_device_data(struct cryp_ctx *ctx,
418 				struct cryp_device_data **device_data)
419 {
420 	int ret;
421 	struct klist_iter device_iterator;
422 	struct klist_node *device_node;
423 	struct cryp_device_data *local_device_data = NULL;
424 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
425 
426 	/* Wait until a device is available */
427 	ret = down_interruptible(&driver_data.device_allocation);
428 	if (ret)
429 		return ret;  /* Interrupted */
430 
431 	/* Select a device */
432 	klist_iter_init(&driver_data.device_list, &device_iterator);
433 
434 	device_node = klist_next(&device_iterator);
435 	while (device_node) {
436 		local_device_data = container_of(device_node,
437 					   struct cryp_device_data, list_node);
438 		spin_lock(&local_device_data->ctx_lock);
439 		/* current_ctx allocates a device, NULL = unallocated */
440 		if (local_device_data->current_ctx) {
441 			device_node = klist_next(&device_iterator);
442 		} else {
443 			local_device_data->current_ctx = ctx;
444 			ctx->device = local_device_data;
445 			spin_unlock(&local_device_data->ctx_lock);
446 			break;
447 		}
448 		spin_unlock(&local_device_data->ctx_lock);
449 	}
450 	klist_iter_exit(&device_iterator);
451 
452 	if (!device_node) {
453 		/**
454 		 * No free device found.
455 		 * Since we allocated a device with down_interruptible, this
456 		 * should not be able to happen.
457 		 * Number of available devices, which are contained in
458 		 * device_allocation, is therefore decremented by not doing
459 		 * an up(device_allocation).
460 		 */
461 		return -EBUSY;
462 	}
463 
464 	*device_data = local_device_data;
465 
466 	return 0;
467 }
468 
cryp_dma_setup_channel(struct cryp_device_data * device_data,struct device * dev)469 static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
470 				   struct device *dev)
471 {
472 	struct dma_slave_config mem2cryp = {
473 		.direction = DMA_MEM_TO_DEV,
474 		.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
475 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
476 		.dst_maxburst = 4,
477 	};
478 	struct dma_slave_config cryp2mem = {
479 		.direction = DMA_DEV_TO_MEM,
480 		.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
481 		.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
482 		.src_maxburst = 4,
483 	};
484 
485 	dma_cap_zero(device_data->dma.mask);
486 	dma_cap_set(DMA_SLAVE, device_data->dma.mask);
487 
488 	device_data->dma.cfg_mem2cryp = mem_to_engine;
489 	device_data->dma.chan_mem2cryp =
490 		dma_request_channel(device_data->dma.mask,
491 				    stedma40_filter,
492 				    device_data->dma.cfg_mem2cryp);
493 
494 	device_data->dma.cfg_cryp2mem = engine_to_mem;
495 	device_data->dma.chan_cryp2mem =
496 		dma_request_channel(device_data->dma.mask,
497 				    stedma40_filter,
498 				    device_data->dma.cfg_cryp2mem);
499 
500 	dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
501 	dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
502 
503 	init_completion(&device_data->dma.cryp_dma_complete);
504 }
505 
cryp_dma_out_callback(void * data)506 static void cryp_dma_out_callback(void *data)
507 {
508 	struct cryp_ctx *ctx = (struct cryp_ctx *) data;
509 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
510 
511 	complete(&ctx->device->dma.cryp_dma_complete);
512 }
513 
cryp_set_dma_transfer(struct cryp_ctx * ctx,struct scatterlist * sg,int len,enum dma_data_direction direction)514 static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
515 				 struct scatterlist *sg,
516 				 int len,
517 				 enum dma_data_direction direction)
518 {
519 	struct dma_async_tx_descriptor *desc;
520 	struct dma_chan *channel = NULL;
521 	dma_cookie_t cookie;
522 
523 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
524 
525 	if (unlikely(!IS_ALIGNED((unsigned long)sg, 4))) {
526 		dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
527 			"aligned! Addr: 0x%08lx", __func__, (unsigned long)sg);
528 		return -EFAULT;
529 	}
530 
531 	switch (direction) {
532 	case DMA_TO_DEVICE:
533 		channel = ctx->device->dma.chan_mem2cryp;
534 		ctx->device->dma.sg_src = sg;
535 		ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
536 						 ctx->device->dma.sg_src,
537 						 ctx->device->dma.nents_src,
538 						 direction);
539 
540 		if (!ctx->device->dma.sg_src_len) {
541 			dev_dbg(ctx->device->dev,
542 				"[%s]: Could not map the sg list (TO_DEVICE)",
543 				__func__);
544 			return -EFAULT;
545 		}
546 
547 		dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
548 			"(TO_DEVICE)", __func__);
549 
550 		desc = dmaengine_prep_slave_sg(channel,
551 				ctx->device->dma.sg_src,
552 				ctx->device->dma.sg_src_len,
553 				DMA_MEM_TO_DEV, DMA_CTRL_ACK);
554 		break;
555 
556 	case DMA_FROM_DEVICE:
557 		channel = ctx->device->dma.chan_cryp2mem;
558 		ctx->device->dma.sg_dst = sg;
559 		ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
560 						 ctx->device->dma.sg_dst,
561 						 ctx->device->dma.nents_dst,
562 						 direction);
563 
564 		if (!ctx->device->dma.sg_dst_len) {
565 			dev_dbg(ctx->device->dev,
566 				"[%s]: Could not map the sg list (FROM_DEVICE)",
567 				__func__);
568 			return -EFAULT;
569 		}
570 
571 		dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
572 			"(FROM_DEVICE)", __func__);
573 
574 		desc = dmaengine_prep_slave_sg(channel,
575 				ctx->device->dma.sg_dst,
576 				ctx->device->dma.sg_dst_len,
577 				DMA_DEV_TO_MEM,
578 				DMA_CTRL_ACK |
579 				DMA_PREP_INTERRUPT);
580 
581 		desc->callback = cryp_dma_out_callback;
582 		desc->callback_param = ctx;
583 		break;
584 
585 	default:
586 		dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
587 			__func__);
588 		return -EFAULT;
589 	}
590 
591 	cookie = dmaengine_submit(desc);
592 	if (dma_submit_error(cookie)) {
593 		dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n",
594 			__func__);
595 		return cookie;
596 	}
597 
598 	dma_async_issue_pending(channel);
599 
600 	return 0;
601 }
602 
cryp_dma_done(struct cryp_ctx * ctx)603 static void cryp_dma_done(struct cryp_ctx *ctx)
604 {
605 	struct dma_chan *chan;
606 
607 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
608 
609 	chan = ctx->device->dma.chan_mem2cryp;
610 	dmaengine_terminate_all(chan);
611 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
612 		     ctx->device->dma.nents_src, DMA_TO_DEVICE);
613 
614 	chan = ctx->device->dma.chan_cryp2mem;
615 	dmaengine_terminate_all(chan);
616 	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
617 		     ctx->device->dma.nents_dst, DMA_FROM_DEVICE);
618 }
619 
cryp_dma_write(struct cryp_ctx * ctx,struct scatterlist * sg,int len)620 static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
621 			  int len)
622 {
623 	int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
624 	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
625 
626 	if (error) {
627 		dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
628 			"failed", __func__);
629 		return error;
630 	}
631 
632 	return len;
633 }
634 
cryp_dma_read(struct cryp_ctx * ctx,struct scatterlist * sg,int len)635 static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
636 {
637 	int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
638 	if (error) {
639 		dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
640 			"failed", __func__);
641 		return error;
642 	}
643 
644 	return len;
645 }
646 
cryp_polling_mode(struct cryp_ctx * ctx,struct cryp_device_data * device_data)647 static void cryp_polling_mode(struct cryp_ctx *ctx,
648 			      struct cryp_device_data *device_data)
649 {
650 	int len = ctx->blocksize / BYTES_PER_WORD;
651 	int remaining_length = ctx->datalen;
652 	u32 *indata = (u32 *)ctx->indata;
653 	u32 *outdata = (u32 *)ctx->outdata;
654 
655 	while (remaining_length > 0) {
656 		writesl(&device_data->base->din, indata, len);
657 		indata += len;
658 		remaining_length -= (len * BYTES_PER_WORD);
659 		cryp_wait_until_done(device_data);
660 
661 		readsl(&device_data->base->dout, outdata, len);
662 		outdata += len;
663 		cryp_wait_until_done(device_data);
664 	}
665 }
666 
cryp_disable_power(struct device * dev,struct cryp_device_data * device_data,bool save_device_context)667 static int cryp_disable_power(struct device *dev,
668 			      struct cryp_device_data *device_data,
669 			      bool save_device_context)
670 {
671 	int ret = 0;
672 
673 	dev_dbg(dev, "[%s]", __func__);
674 
675 	spin_lock(&device_data->power_state_spinlock);
676 	if (!device_data->power_state)
677 		goto out;
678 
679 	spin_lock(&device_data->ctx_lock);
680 	if (save_device_context && device_data->current_ctx) {
681 		cryp_save_device_context(device_data,
682 				&device_data->current_ctx->dev_ctx,
683 				cryp_mode);
684 		device_data->restore_dev_ctx = true;
685 	}
686 	spin_unlock(&device_data->ctx_lock);
687 
688 	clk_disable(device_data->clk);
689 	ret = regulator_disable(device_data->pwr_regulator);
690 	if (ret)
691 		dev_err(dev, "[%s]: "
692 				"regulator_disable() failed!",
693 				__func__);
694 
695 	device_data->power_state = false;
696 
697 out:
698 	spin_unlock(&device_data->power_state_spinlock);
699 
700 	return ret;
701 }
702 
cryp_enable_power(struct device * dev,struct cryp_device_data * device_data,bool restore_device_context)703 static int cryp_enable_power(
704 		struct device *dev,
705 		struct cryp_device_data *device_data,
706 		bool restore_device_context)
707 {
708 	int ret = 0;
709 
710 	dev_dbg(dev, "[%s]", __func__);
711 
712 	spin_lock(&device_data->power_state_spinlock);
713 	if (!device_data->power_state) {
714 		ret = regulator_enable(device_data->pwr_regulator);
715 		if (ret) {
716 			dev_err(dev, "[%s]: regulator_enable() failed!",
717 					__func__);
718 			goto out;
719 		}
720 
721 		ret = clk_enable(device_data->clk);
722 		if (ret) {
723 			dev_err(dev, "[%s]: clk_enable() failed!",
724 					__func__);
725 			regulator_disable(device_data->pwr_regulator);
726 			goto out;
727 		}
728 		device_data->power_state = true;
729 	}
730 
731 	if (device_data->restore_dev_ctx) {
732 		spin_lock(&device_data->ctx_lock);
733 		if (restore_device_context && device_data->current_ctx) {
734 			device_data->restore_dev_ctx = false;
735 			cryp_restore_device_context(device_data,
736 					&device_data->current_ctx->dev_ctx);
737 		}
738 		spin_unlock(&device_data->ctx_lock);
739 	}
740 out:
741 	spin_unlock(&device_data->power_state_spinlock);
742 
743 	return ret;
744 }
745 
hw_crypt_noxts(struct cryp_ctx * ctx,struct cryp_device_data * device_data)746 static int hw_crypt_noxts(struct cryp_ctx *ctx,
747 			  struct cryp_device_data *device_data)
748 {
749 	int ret = 0;
750 
751 	const u8 *indata = ctx->indata;
752 	u8 *outdata = ctx->outdata;
753 	u32 datalen = ctx->datalen;
754 	u32 outlen = datalen;
755 
756 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
757 
758 	ctx->outlen = ctx->datalen;
759 
760 	if (unlikely(!IS_ALIGNED((unsigned long)indata, 4))) {
761 		pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
762 			 "0x%08lx", __func__, (unsigned long)indata);
763 		return -EINVAL;
764 	}
765 
766 	ret = cryp_setup_context(ctx, device_data);
767 
768 	if (ret)
769 		goto out;
770 
771 	if (cryp_mode == CRYP_MODE_INTERRUPT) {
772 		cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
773 				    CRYP_IRQ_SRC_OUTPUT_FIFO);
774 
775 		/*
776 		 * ctx->outlen is decremented in the cryp_interrupt_handler
777 		 * function. We had to add cpu_relax() (barrier) to make sure
778 		 * that gcc didn't optimze away this variable.
779 		 */
780 		while (ctx->outlen > 0)
781 			cpu_relax();
782 	} else if (cryp_mode == CRYP_MODE_POLLING ||
783 		   cryp_mode == CRYP_MODE_DMA) {
784 		/*
785 		 * The reason for having DMA in this if case is that if we are
786 		 * running cryp_mode = 2, then we separate DMA routines for
787 		 * handling cipher/plaintext > blocksize, except when
788 		 * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
789 		 * the polling mode. Overhead of doing DMA setup eats up the
790 		 * benefits using it.
791 		 */
792 		cryp_polling_mode(ctx, device_data);
793 	} else {
794 		dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
795 			__func__);
796 		ret = -EPERM;
797 		goto out;
798 	}
799 
800 	cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
801 	ctx->updated = 1;
802 
803 out:
804 	ctx->indata = indata;
805 	ctx->outdata = outdata;
806 	ctx->datalen = datalen;
807 	ctx->outlen = outlen;
808 
809 	return ret;
810 }
811 
get_nents(struct scatterlist * sg,int nbytes)812 static int get_nents(struct scatterlist *sg, int nbytes)
813 {
814 	int nents = 0;
815 
816 	while (nbytes > 0) {
817 		nbytes -= sg->length;
818 		sg = sg_next(sg);
819 		nents++;
820 	}
821 
822 	return nents;
823 }
824 
ablk_dma_crypt(struct skcipher_request * areq)825 static int ablk_dma_crypt(struct skcipher_request *areq)
826 {
827 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
828 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
829 	struct cryp_device_data *device_data;
830 
831 	int bytes_written = 0;
832 	int bytes_read = 0;
833 	int ret;
834 
835 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
836 
837 	ctx->datalen = areq->cryptlen;
838 	ctx->outlen = areq->cryptlen;
839 
840 	ret = cryp_get_device_data(ctx, &device_data);
841 	if (ret)
842 		return ret;
843 
844 	ret = cryp_setup_context(ctx, device_data);
845 	if (ret)
846 		goto out;
847 
848 	/* We have the device now, so store the nents in the dma struct. */
849 	ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
850 	ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
851 
852 	/* Enable DMA in- and output. */
853 	cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
854 
855 	bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
856 	bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
857 
858 	wait_for_completion(&ctx->device->dma.cryp_dma_complete);
859 	cryp_dma_done(ctx);
860 
861 	cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
862 	ctx->updated = 1;
863 
864 out:
865 	spin_lock(&device_data->ctx_lock);
866 	device_data->current_ctx = NULL;
867 	ctx->device = NULL;
868 	spin_unlock(&device_data->ctx_lock);
869 
870 	/*
871 	 * The down_interruptible part for this semaphore is called in
872 	 * cryp_get_device_data.
873 	 */
874 	up(&driver_data.device_allocation);
875 
876 	if (unlikely(bytes_written != bytes_read))
877 		return -EPERM;
878 
879 	return 0;
880 }
881 
ablk_crypt(struct skcipher_request * areq)882 static int ablk_crypt(struct skcipher_request *areq)
883 {
884 	struct skcipher_walk walk;
885 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
886 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
887 	struct cryp_device_data *device_data;
888 	unsigned long src_paddr;
889 	unsigned long dst_paddr;
890 	int ret;
891 	int nbytes;
892 
893 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
894 
895 	ret = cryp_get_device_data(ctx, &device_data);
896 	if (ret)
897 		goto out;
898 
899 	ret = skcipher_walk_async(&walk, areq);
900 
901 	if (ret) {
902 		pr_err(DEV_DBG_NAME "[%s]: skcipher_walk_async() failed!",
903 			__func__);
904 		goto out;
905 	}
906 
907 	while ((nbytes = walk.nbytes) > 0) {
908 		ctx->iv = walk.iv;
909 		src_paddr = (page_to_phys(walk.src.phys.page) + walk.src.phys.offset);
910 		ctx->indata = phys_to_virt(src_paddr);
911 
912 		dst_paddr = (page_to_phys(walk.dst.phys.page) + walk.dst.phys.offset);
913 		ctx->outdata = phys_to_virt(dst_paddr);
914 
915 		ctx->datalen = nbytes - (nbytes % ctx->blocksize);
916 
917 		ret = hw_crypt_noxts(ctx, device_data);
918 		if (ret)
919 			goto out;
920 
921 		nbytes -= ctx->datalen;
922 		ret = skcipher_walk_done(&walk, nbytes);
923 		if (ret)
924 			goto out;
925 	}
926 
927 out:
928 	/* Release the device */
929 	spin_lock(&device_data->ctx_lock);
930 	device_data->current_ctx = NULL;
931 	ctx->device = NULL;
932 	spin_unlock(&device_data->ctx_lock);
933 
934 	/*
935 	 * The down_interruptible part for this semaphore is called in
936 	 * cryp_get_device_data.
937 	 */
938 	up(&driver_data.device_allocation);
939 
940 	return ret;
941 }
942 
aes_skcipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)943 static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
944 				 const u8 *key, unsigned int keylen)
945 {
946 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
947 
948 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
949 
950 	switch (keylen) {
951 	case AES_KEYSIZE_128:
952 		ctx->config.keysize = CRYP_KEY_SIZE_128;
953 		break;
954 
955 	case AES_KEYSIZE_192:
956 		ctx->config.keysize = CRYP_KEY_SIZE_192;
957 		break;
958 
959 	case AES_KEYSIZE_256:
960 		ctx->config.keysize = CRYP_KEY_SIZE_256;
961 		break;
962 
963 	default:
964 		pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
965 		return -EINVAL;
966 	}
967 
968 	memcpy(ctx->key, key, keylen);
969 	ctx->keylen = keylen;
970 
971 	ctx->updated = 0;
972 
973 	return 0;
974 }
975 
des_skcipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)976 static int des_skcipher_setkey(struct crypto_skcipher *cipher,
977 				 const u8 *key, unsigned int keylen)
978 {
979 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
980 	int err;
981 
982 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
983 
984 	err = verify_skcipher_des_key(cipher, key);
985 	if (err)
986 		return err;
987 
988 	memcpy(ctx->key, key, keylen);
989 	ctx->keylen = keylen;
990 
991 	ctx->updated = 0;
992 	return 0;
993 }
994 
des3_skcipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)995 static int des3_skcipher_setkey(struct crypto_skcipher *cipher,
996 				  const u8 *key, unsigned int keylen)
997 {
998 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
999 	int err;
1000 
1001 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1002 
1003 	err = verify_skcipher_des3_key(cipher, key);
1004 	if (err)
1005 		return err;
1006 
1007 	memcpy(ctx->key, key, keylen);
1008 	ctx->keylen = keylen;
1009 
1010 	ctx->updated = 0;
1011 	return 0;
1012 }
1013 
cryp_blk_encrypt(struct skcipher_request * areq)1014 static int cryp_blk_encrypt(struct skcipher_request *areq)
1015 {
1016 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1017 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
1018 
1019 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1020 
1021 	ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
1022 
1023 	/*
1024 	 * DMA does not work for DES due to a hw bug */
1025 	if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1026 		return ablk_dma_crypt(areq);
1027 
1028 	/* For everything except DMA, we run the non DMA version. */
1029 	return ablk_crypt(areq);
1030 }
1031 
cryp_blk_decrypt(struct skcipher_request * areq)1032 static int cryp_blk_decrypt(struct skcipher_request *areq)
1033 {
1034 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1035 	struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
1036 
1037 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1038 
1039 	ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
1040 
1041 	/* DMA does not work for DES due to a hw bug */
1042 	if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1043 		return ablk_dma_crypt(areq);
1044 
1045 	/* For everything except DMA, we run the non DMA version. */
1046 	return ablk_crypt(areq);
1047 }
1048 
1049 struct cryp_algo_template {
1050 	enum cryp_algo_mode algomode;
1051 	struct skcipher_alg skcipher;
1052 };
1053 
cryp_init_tfm(struct crypto_skcipher * tfm)1054 static int cryp_init_tfm(struct crypto_skcipher *tfm)
1055 {
1056 	struct cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
1057 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1058 	struct cryp_algo_template *cryp_alg = container_of(alg,
1059 			struct cryp_algo_template,
1060 			skcipher);
1061 
1062 	ctx->config.algomode = cryp_alg->algomode;
1063 	ctx->blocksize = crypto_skcipher_blocksize(tfm);
1064 
1065 	return 0;
1066 }
1067 
1068 static struct cryp_algo_template cryp_algs[] = {
1069 	{
1070 		.algomode = CRYP_ALGO_AES_ECB,
1071 		.skcipher = {
1072 			.base.cra_name		= "ecb(aes)",
1073 			.base.cra_driver_name	= "ecb-aes-ux500",
1074 			.base.cra_priority	= 300,
1075 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1076 			.base.cra_blocksize	= AES_BLOCK_SIZE,
1077 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1078 			.base.cra_alignmask	= 3,
1079 			.base.cra_module	= THIS_MODULE,
1080 
1081 			.min_keysize		= AES_MIN_KEY_SIZE,
1082 			.max_keysize		= AES_MAX_KEY_SIZE,
1083 			.setkey			= aes_skcipher_setkey,
1084 			.encrypt		= cryp_blk_encrypt,
1085 			.decrypt		= cryp_blk_decrypt,
1086 			.init			= cryp_init_tfm,
1087 		}
1088 	},
1089 	{
1090 		.algomode = CRYP_ALGO_AES_CBC,
1091 		.skcipher = {
1092 			.base.cra_name		= "cbc(aes)",
1093 			.base.cra_driver_name	= "cbc-aes-ux500",
1094 			.base.cra_priority	= 300,
1095 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1096 			.base.cra_blocksize	= AES_BLOCK_SIZE,
1097 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1098 			.base.cra_alignmask	= 3,
1099 			.base.cra_module	= THIS_MODULE,
1100 
1101 			.min_keysize		= AES_MIN_KEY_SIZE,
1102 			.max_keysize		= AES_MAX_KEY_SIZE,
1103 			.setkey			= aes_skcipher_setkey,
1104 			.encrypt		= cryp_blk_encrypt,
1105 			.decrypt		= cryp_blk_decrypt,
1106 			.init			= cryp_init_tfm,
1107 			.ivsize			= AES_BLOCK_SIZE,
1108 		}
1109 	},
1110 	{
1111 		.algomode = CRYP_ALGO_AES_CTR,
1112 		.skcipher = {
1113 			.base.cra_name		= "ctr(aes)",
1114 			.base.cra_driver_name	= "ctr-aes-ux500",
1115 			.base.cra_priority	= 300,
1116 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1117 			.base.cra_blocksize	= 1,
1118 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1119 			.base.cra_alignmask	= 3,
1120 			.base.cra_module	= THIS_MODULE,
1121 
1122 			.min_keysize		= AES_MIN_KEY_SIZE,
1123 			.max_keysize		= AES_MAX_KEY_SIZE,
1124 			.setkey			= aes_skcipher_setkey,
1125 			.encrypt		= cryp_blk_encrypt,
1126 			.decrypt		= cryp_blk_decrypt,
1127 			.init			= cryp_init_tfm,
1128 			.ivsize			= AES_BLOCK_SIZE,
1129 			.chunksize		= AES_BLOCK_SIZE,
1130 		}
1131 	},
1132 	{
1133 		.algomode = CRYP_ALGO_DES_ECB,
1134 		.skcipher = {
1135 			.base.cra_name		= "ecb(des)",
1136 			.base.cra_driver_name	= "ecb-des-ux500",
1137 			.base.cra_priority	= 300,
1138 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1139 			.base.cra_blocksize	= DES_BLOCK_SIZE,
1140 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1141 			.base.cra_alignmask	= 3,
1142 			.base.cra_module	= THIS_MODULE,
1143 
1144 			.min_keysize		= DES_KEY_SIZE,
1145 			.max_keysize		= DES_KEY_SIZE,
1146 			.setkey			= des_skcipher_setkey,
1147 			.encrypt		= cryp_blk_encrypt,
1148 			.decrypt		= cryp_blk_decrypt,
1149 			.init			= cryp_init_tfm,
1150 		}
1151 	},
1152 	{
1153 		.algomode = CRYP_ALGO_TDES_ECB,
1154 		.skcipher = {
1155 			.base.cra_name		= "ecb(des3_ede)",
1156 			.base.cra_driver_name	= "ecb-des3_ede-ux500",
1157 			.base.cra_priority	= 300,
1158 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1159 			.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1160 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1161 			.base.cra_alignmask	= 3,
1162 			.base.cra_module	= THIS_MODULE,
1163 
1164 			.min_keysize		= DES3_EDE_KEY_SIZE,
1165 			.max_keysize		= DES3_EDE_KEY_SIZE,
1166 			.setkey			= des3_skcipher_setkey,
1167 			.encrypt		= cryp_blk_encrypt,
1168 			.decrypt		= cryp_blk_decrypt,
1169 			.init			= cryp_init_tfm,
1170 		}
1171 	},
1172 	{
1173 		.algomode = CRYP_ALGO_DES_CBC,
1174 		.skcipher = {
1175 			.base.cra_name		= "cbc(des)",
1176 			.base.cra_driver_name	= "cbc-des-ux500",
1177 			.base.cra_priority	= 300,
1178 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1179 			.base.cra_blocksize	= DES_BLOCK_SIZE,
1180 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1181 			.base.cra_alignmask	= 3,
1182 			.base.cra_module	= THIS_MODULE,
1183 
1184 			.min_keysize		= DES_KEY_SIZE,
1185 			.max_keysize		= DES_KEY_SIZE,
1186 			.setkey			= des_skcipher_setkey,
1187 			.encrypt		= cryp_blk_encrypt,
1188 			.decrypt		= cryp_blk_decrypt,
1189 			.ivsize			= DES_BLOCK_SIZE,
1190 			.init			= cryp_init_tfm,
1191 		}
1192 	},
1193 	{
1194 		.algomode = CRYP_ALGO_TDES_CBC,
1195 		.skcipher = {
1196 			.base.cra_name		= "cbc(des3_ede)",
1197 			.base.cra_driver_name	= "cbc-des3_ede-ux500",
1198 			.base.cra_priority	= 300,
1199 			.base.cra_flags		= CRYPTO_ALG_ASYNC,
1200 			.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1201 			.base.cra_ctxsize	= sizeof(struct cryp_ctx),
1202 			.base.cra_alignmask	= 3,
1203 			.base.cra_module	= THIS_MODULE,
1204 
1205 			.min_keysize		= DES3_EDE_KEY_SIZE,
1206 			.max_keysize		= DES3_EDE_KEY_SIZE,
1207 			.setkey			= des3_skcipher_setkey,
1208 			.encrypt		= cryp_blk_encrypt,
1209 			.decrypt		= cryp_blk_decrypt,
1210 			.ivsize			= DES3_EDE_BLOCK_SIZE,
1211 			.init			= cryp_init_tfm,
1212 		}
1213 	}
1214 };
1215 
1216 /**
1217  * cryp_algs_register_all -
1218  */
cryp_algs_register_all(void)1219 static int cryp_algs_register_all(void)
1220 {
1221 	int ret;
1222 	int i;
1223 	int count;
1224 
1225 	pr_debug("[%s]", __func__);
1226 
1227 	for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
1228 		ret = crypto_register_skcipher(&cryp_algs[i].skcipher);
1229 		if (ret) {
1230 			count = i;
1231 			pr_err("[%s] alg registration failed",
1232 					cryp_algs[i].skcipher.base.cra_driver_name);
1233 			goto unreg;
1234 		}
1235 	}
1236 	return 0;
1237 unreg:
1238 	for (i = 0; i < count; i++)
1239 		crypto_unregister_skcipher(&cryp_algs[i].skcipher);
1240 	return ret;
1241 }
1242 
1243 /**
1244  * cryp_algs_unregister_all -
1245  */
cryp_algs_unregister_all(void)1246 static void cryp_algs_unregister_all(void)
1247 {
1248 	int i;
1249 
1250 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
1251 
1252 	for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
1253 		crypto_unregister_skcipher(&cryp_algs[i].skcipher);
1254 }
1255 
ux500_cryp_probe(struct platform_device * pdev)1256 static int ux500_cryp_probe(struct platform_device *pdev)
1257 {
1258 	int ret;
1259 	struct resource *res;
1260 	struct cryp_device_data *device_data;
1261 	struct cryp_protection_config prot = {
1262 		.privilege_access = CRYP_STATE_ENABLE
1263 	};
1264 	struct device *dev = &pdev->dev;
1265 
1266 	dev_dbg(dev, "[%s]", __func__);
1267 	device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
1268 	if (!device_data) {
1269 		ret = -ENOMEM;
1270 		goto out;
1271 	}
1272 
1273 	device_data->dev = dev;
1274 	device_data->current_ctx = NULL;
1275 
1276 	/* Grab the DMA configuration from platform data. */
1277 	mem_to_engine = &((struct cryp_platform_data *)
1278 			 dev->platform_data)->mem_to_engine;
1279 	engine_to_mem = &((struct cryp_platform_data *)
1280 			 dev->platform_data)->engine_to_mem;
1281 
1282 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1283 	if (!res) {
1284 		dev_err(dev, "[%s]: platform_get_resource() failed",
1285 				__func__);
1286 		ret = -ENODEV;
1287 		goto out;
1288 	}
1289 
1290 	device_data->phybase = res->start;
1291 	device_data->base = devm_ioremap_resource(dev, res);
1292 	if (IS_ERR(device_data->base)) {
1293 		ret = PTR_ERR(device_data->base);
1294 		goto out;
1295 	}
1296 
1297 	spin_lock_init(&device_data->ctx_lock);
1298 	spin_lock_init(&device_data->power_state_spinlock);
1299 
1300 	/* Enable power for CRYP hardware block */
1301 	device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
1302 	if (IS_ERR(device_data->pwr_regulator)) {
1303 		dev_err(dev, "[%s]: could not get cryp regulator", __func__);
1304 		ret = PTR_ERR(device_data->pwr_regulator);
1305 		device_data->pwr_regulator = NULL;
1306 		goto out;
1307 	}
1308 
1309 	/* Enable the clk for CRYP hardware block */
1310 	device_data->clk = devm_clk_get(&pdev->dev, NULL);
1311 	if (IS_ERR(device_data->clk)) {
1312 		dev_err(dev, "[%s]: clk_get() failed!", __func__);
1313 		ret = PTR_ERR(device_data->clk);
1314 		goto out_regulator;
1315 	}
1316 
1317 	ret = clk_prepare(device_data->clk);
1318 	if (ret) {
1319 		dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
1320 		goto out_regulator;
1321 	}
1322 
1323 	/* Enable device power (and clock) */
1324 	ret = cryp_enable_power(device_data->dev, device_data, false);
1325 	if (ret) {
1326 		dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1327 		goto out_clk_unprepare;
1328 	}
1329 
1330 	if (cryp_check(device_data)) {
1331 		dev_err(dev, "[%s]: cryp_check() failed!", __func__);
1332 		ret = -EINVAL;
1333 		goto out_power;
1334 	}
1335 
1336 	if (cryp_configure_protection(device_data, &prot)) {
1337 		dev_err(dev, "[%s]: cryp_configure_protection() failed!",
1338 			__func__);
1339 		ret = -EINVAL;
1340 		goto out_power;
1341 	}
1342 
1343 	device_data->irq = platform_get_irq(pdev, 0);
1344 	if (device_data->irq <= 0) {
1345 		ret = device_data->irq ? device_data->irq : -ENXIO;
1346 		goto out_power;
1347 	}
1348 
1349 	ret = devm_request_irq(&pdev->dev, device_data->irq,
1350 			       cryp_interrupt_handler, 0, "cryp1", device_data);
1351 	if (ret) {
1352 		dev_err(dev, "[%s]: Unable to request IRQ", __func__);
1353 		goto out_power;
1354 	}
1355 
1356 	if (cryp_mode == CRYP_MODE_DMA)
1357 		cryp_dma_setup_channel(device_data, dev);
1358 
1359 	platform_set_drvdata(pdev, device_data);
1360 
1361 	/* Put the new device into the device list... */
1362 	klist_add_tail(&device_data->list_node, &driver_data.device_list);
1363 
1364 	/* ... and signal that a new device is available. */
1365 	up(&driver_data.device_allocation);
1366 
1367 	atomic_set(&session_id, 1);
1368 
1369 	ret = cryp_algs_register_all();
1370 	if (ret) {
1371 		dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
1372 			__func__);
1373 		goto out_power;
1374 	}
1375 
1376 	dev_info(dev, "successfully registered\n");
1377 
1378 	return 0;
1379 
1380 out_power:
1381 	cryp_disable_power(device_data->dev, device_data, false);
1382 
1383 out_clk_unprepare:
1384 	clk_unprepare(device_data->clk);
1385 
1386 out_regulator:
1387 	regulator_put(device_data->pwr_regulator);
1388 
1389 out:
1390 	return ret;
1391 }
1392 
ux500_cryp_remove(struct platform_device * pdev)1393 static int ux500_cryp_remove(struct platform_device *pdev)
1394 {
1395 	struct cryp_device_data *device_data;
1396 
1397 	dev_dbg(&pdev->dev, "[%s]", __func__);
1398 	device_data = platform_get_drvdata(pdev);
1399 	if (!device_data) {
1400 		dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1401 			__func__);
1402 		return -ENOMEM;
1403 	}
1404 
1405 	/* Try to decrease the number of available devices. */
1406 	if (down_trylock(&driver_data.device_allocation))
1407 		return -EBUSY;
1408 
1409 	/* Check that the device is free */
1410 	spin_lock(&device_data->ctx_lock);
1411 	/* current_ctx allocates a device, NULL = unallocated */
1412 	if (device_data->current_ctx) {
1413 		/* The device is busy */
1414 		spin_unlock(&device_data->ctx_lock);
1415 		/* Return the device to the pool. */
1416 		up(&driver_data.device_allocation);
1417 		return -EBUSY;
1418 	}
1419 
1420 	spin_unlock(&device_data->ctx_lock);
1421 
1422 	/* Remove the device from the list */
1423 	if (klist_node_attached(&device_data->list_node))
1424 		klist_remove(&device_data->list_node);
1425 
1426 	/* If this was the last device, remove the services */
1427 	if (list_empty(&driver_data.device_list.k_list))
1428 		cryp_algs_unregister_all();
1429 
1430 	if (cryp_disable_power(&pdev->dev, device_data, false))
1431 		dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1432 			__func__);
1433 
1434 	clk_unprepare(device_data->clk);
1435 	regulator_put(device_data->pwr_regulator);
1436 
1437 	return 0;
1438 }
1439 
ux500_cryp_shutdown(struct platform_device * pdev)1440 static void ux500_cryp_shutdown(struct platform_device *pdev)
1441 {
1442 	struct cryp_device_data *device_data;
1443 
1444 	dev_dbg(&pdev->dev, "[%s]", __func__);
1445 
1446 	device_data = platform_get_drvdata(pdev);
1447 	if (!device_data) {
1448 		dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1449 			__func__);
1450 		return;
1451 	}
1452 
1453 	/* Check that the device is free */
1454 	spin_lock(&device_data->ctx_lock);
1455 	/* current_ctx allocates a device, NULL = unallocated */
1456 	if (!device_data->current_ctx) {
1457 		if (down_trylock(&driver_data.device_allocation))
1458 			dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1459 				"Shutting down anyway...", __func__);
1460 		/**
1461 		 * (Allocate the device)
1462 		 * Need to set this to non-null (dummy) value,
1463 		 * to avoid usage if context switching.
1464 		 */
1465 		device_data->current_ctx++;
1466 	}
1467 	spin_unlock(&device_data->ctx_lock);
1468 
1469 	/* Remove the device from the list */
1470 	if (klist_node_attached(&device_data->list_node))
1471 		klist_remove(&device_data->list_node);
1472 
1473 	/* If this was the last device, remove the services */
1474 	if (list_empty(&driver_data.device_list.k_list))
1475 		cryp_algs_unregister_all();
1476 
1477 	if (cryp_disable_power(&pdev->dev, device_data, false))
1478 		dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1479 			__func__);
1480 
1481 }
1482 
1483 #ifdef CONFIG_PM_SLEEP
ux500_cryp_suspend(struct device * dev)1484 static int ux500_cryp_suspend(struct device *dev)
1485 {
1486 	int ret;
1487 	struct platform_device *pdev = to_platform_device(dev);
1488 	struct cryp_device_data *device_data;
1489 	struct cryp_ctx *temp_ctx = NULL;
1490 
1491 	dev_dbg(dev, "[%s]", __func__);
1492 
1493 	/* Handle state? */
1494 	device_data = platform_get_drvdata(pdev);
1495 	if (!device_data) {
1496 		dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1497 		return -ENOMEM;
1498 	}
1499 
1500 	disable_irq(device_data->irq);
1501 
1502 	spin_lock(&device_data->ctx_lock);
1503 	if (!device_data->current_ctx)
1504 		device_data->current_ctx++;
1505 	spin_unlock(&device_data->ctx_lock);
1506 
1507 	if (device_data->current_ctx == ++temp_ctx) {
1508 		if (down_interruptible(&driver_data.device_allocation))
1509 			dev_dbg(dev, "[%s]: down_interruptible() failed",
1510 				__func__);
1511 		ret = cryp_disable_power(dev, device_data, false);
1512 
1513 	} else
1514 		ret = cryp_disable_power(dev, device_data, true);
1515 
1516 	if (ret)
1517 		dev_err(dev, "[%s]: cryp_disable_power()", __func__);
1518 
1519 	return ret;
1520 }
1521 
ux500_cryp_resume(struct device * dev)1522 static int ux500_cryp_resume(struct device *dev)
1523 {
1524 	int ret = 0;
1525 	struct platform_device *pdev = to_platform_device(dev);
1526 	struct cryp_device_data *device_data;
1527 	struct cryp_ctx *temp_ctx = NULL;
1528 
1529 	dev_dbg(dev, "[%s]", __func__);
1530 
1531 	device_data = platform_get_drvdata(pdev);
1532 	if (!device_data) {
1533 		dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1534 		return -ENOMEM;
1535 	}
1536 
1537 	spin_lock(&device_data->ctx_lock);
1538 	if (device_data->current_ctx == ++temp_ctx)
1539 		device_data->current_ctx = NULL;
1540 	spin_unlock(&device_data->ctx_lock);
1541 
1542 
1543 	if (!device_data->current_ctx)
1544 		up(&driver_data.device_allocation);
1545 	else
1546 		ret = cryp_enable_power(dev, device_data, true);
1547 
1548 	if (ret)
1549 		dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1550 	else
1551 		enable_irq(device_data->irq);
1552 
1553 	return ret;
1554 }
1555 #endif
1556 
1557 static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
1558 
1559 static const struct of_device_id ux500_cryp_match[] = {
1560 	{ .compatible = "stericsson,ux500-cryp" },
1561 	{ },
1562 };
1563 MODULE_DEVICE_TABLE(of, ux500_cryp_match);
1564 
1565 static struct platform_driver cryp_driver = {
1566 	.probe  = ux500_cryp_probe,
1567 	.remove = ux500_cryp_remove,
1568 	.shutdown = ux500_cryp_shutdown,
1569 	.driver = {
1570 		.name  = "cryp1",
1571 		.of_match_table = ux500_cryp_match,
1572 		.pm    = &ux500_cryp_pm,
1573 	}
1574 };
1575 
ux500_cryp_mod_init(void)1576 static int __init ux500_cryp_mod_init(void)
1577 {
1578 	pr_debug("[%s] is called!", __func__);
1579 	klist_init(&driver_data.device_list, NULL, NULL);
1580 	/* Initialize the semaphore to 0 devices (locked state) */
1581 	sema_init(&driver_data.device_allocation, 0);
1582 	return platform_driver_register(&cryp_driver);
1583 }
1584 
ux500_cryp_mod_fini(void)1585 static void __exit ux500_cryp_mod_fini(void)
1586 {
1587 	pr_debug("[%s] is called!", __func__);
1588 	platform_driver_unregister(&cryp_driver);
1589 }
1590 
1591 module_init(ux500_cryp_mod_init);
1592 module_exit(ux500_cryp_mod_fini);
1593 
1594 module_param(cryp_mode, int, 0);
1595 
1596 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1597 MODULE_ALIAS_CRYPTO("aes-all");
1598 MODULE_ALIAS_CRYPTO("des-all");
1599 
1600 MODULE_LICENSE("GPL");
1601