1 /*
2  * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3  * Copyright(c) 2009 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the Free
7  * Software Foundation; either version 2 of the License, or (at your option)
8  * any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 59
17  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
18  *
19  * The full GNU General Public License is included in this distribution in the
20  * file called COPYING.
21  */
22 #include <linux/kernel.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/raid/pq.h>
27 #include <linux/async_tx.h>
28 #include <linux/gfp.h>
29 
30 /**
31  * pq_scribble_page - space to hold throwaway P or Q buffer for
32  * synchronous gen_syndrome
33  */
34 static struct page *pq_scribble_page;
35 
36 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
37  * and async_syndrome_val() contains the 'P' destination address at
38  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
39  *
40  * note: these are macros as they are used as lvalues
41  */
42 #define P(b, d) (b[d-2])
43 #define Q(b, d) (b[d-1])
44 
45 /**
46  * do_async_gen_syndrome - asynchronously calculate P and/or Q
47  */
48 static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan * chan,struct page ** blocks,const unsigned char * scfs,unsigned int offset,int disks,size_t len,dma_addr_t * dma_src,struct async_submit_ctl * submit)49 do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
50 		      const unsigned char *scfs, unsigned int offset, int disks,
51 		      size_t len, dma_addr_t *dma_src,
52 		      struct async_submit_ctl *submit)
53 {
54 	struct dma_async_tx_descriptor *tx = NULL;
55 	struct dma_device *dma = chan->device;
56 	enum dma_ctrl_flags dma_flags = 0;
57 	enum async_tx_flags flags_orig = submit->flags;
58 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
59 	dma_async_tx_callback cb_param_orig = submit->cb_param;
60 	int src_cnt = disks - 2;
61 	unsigned char coefs[src_cnt];
62 	unsigned short pq_src_cnt;
63 	dma_addr_t dma_dest[2];
64 	int src_off = 0;
65 	int idx;
66 	int i;
67 
68 	/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
69 	if (P(blocks, disks))
70 		dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
71 					   len, DMA_BIDIRECTIONAL);
72 	else
73 		dma_flags |= DMA_PREP_PQ_DISABLE_P;
74 	if (Q(blocks, disks))
75 		dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
76 					   len, DMA_BIDIRECTIONAL);
77 	else
78 		dma_flags |= DMA_PREP_PQ_DISABLE_Q;
79 
80 	/* convert source addresses being careful to collapse 'empty'
81 	 * sources and update the coefficients accordingly
82 	 */
83 	for (i = 0, idx = 0; i < src_cnt; i++) {
84 		if (blocks[i] == NULL)
85 			continue;
86 		dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
87 					    DMA_TO_DEVICE);
88 		coefs[idx] = scfs[i];
89 		idx++;
90 	}
91 	src_cnt = idx;
92 
93 	while (src_cnt > 0) {
94 		submit->flags = flags_orig;
95 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
96 		/* if we are submitting additional pqs, leave the chain open,
97 		 * clear the callback parameters, and leave the destination
98 		 * buffers mapped
99 		 */
100 		if (src_cnt > pq_src_cnt) {
101 			submit->flags &= ~ASYNC_TX_ACK;
102 			submit->flags |= ASYNC_TX_FENCE;
103 			dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
104 			submit->cb_fn = NULL;
105 			submit->cb_param = NULL;
106 		} else {
107 			dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
108 			submit->cb_fn = cb_fn_orig;
109 			submit->cb_param = cb_param_orig;
110 			if (cb_fn_orig)
111 				dma_flags |= DMA_PREP_INTERRUPT;
112 		}
113 		if (submit->flags & ASYNC_TX_FENCE)
114 			dma_flags |= DMA_PREP_FENCE;
115 
116 		/* Since we have clobbered the src_list we are committed
117 		 * to doing this asynchronously.  Drivers force forward
118 		 * progress in case they can not provide a descriptor
119 		 */
120 		for (;;) {
121 			tx = dma->device_prep_dma_pq(chan, dma_dest,
122 						     &dma_src[src_off],
123 						     pq_src_cnt,
124 						     &coefs[src_off], len,
125 						     dma_flags);
126 			if (likely(tx))
127 				break;
128 			async_tx_quiesce(&submit->depend_tx);
129 			dma_async_issue_pending(chan);
130 		}
131 
132 		async_tx_submit(chan, tx, submit);
133 		submit->depend_tx = tx;
134 
135 		/* drop completed sources */
136 		src_cnt -= pq_src_cnt;
137 		src_off += pq_src_cnt;
138 
139 		dma_flags |= DMA_PREP_CONTINUE;
140 	}
141 
142 	return tx;
143 }
144 
145 /**
146  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
147  */
148 static void
do_sync_gen_syndrome(struct page ** blocks,unsigned int offset,int disks,size_t len,struct async_submit_ctl * submit)149 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
150 		     size_t len, struct async_submit_ctl *submit)
151 {
152 	void **srcs;
153 	int i;
154 
155 	if (submit->scribble)
156 		srcs = submit->scribble;
157 	else
158 		srcs = (void **) blocks;
159 
160 	for (i = 0; i < disks; i++) {
161 		if (blocks[i] == NULL) {
162 			BUG_ON(i > disks - 3); /* P or Q can't be zero */
163 			srcs[i] = (void*)raid6_empty_zero_page;
164 		} else
165 			srcs[i] = page_address(blocks[i]) + offset;
166 	}
167 	raid6_call.gen_syndrome(disks, len, srcs);
168 	async_tx_sync_epilog(submit);
169 }
170 
171 /**
172  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
173  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
174  * @offset: common offset into each block (src and dest) to start transaction
175  * @disks: number of blocks (including missing P or Q, see below)
176  * @len: length of operation in bytes
177  * @submit: submission/completion modifiers
178  *
179  * General note: This routine assumes a field of GF(2^8) with a
180  * primitive polynomial of 0x11d and a generator of {02}.
181  *
182  * 'disks' note: callers can optionally omit either P or Q (but not
183  * both) from the calculation by setting blocks[disks-2] or
184  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
185  * PAGE_SIZE as a temporary buffer of this size is used in the
186  * synchronous path.  'disks' always accounts for both destination
187  * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
188  * set to NULL those buffers will be replaced with the raid6_zero_page
189  * in the synchronous path and omitted in the hardware-asynchronous
190  * path.
191  *
192  * 'blocks' note: if submit->scribble is NULL then the contents of
193  * 'blocks' may be overwritten to perform address conversions
194  * (dma_map_page() or page_address()).
195  */
196 struct dma_async_tx_descriptor *
async_gen_syndrome(struct page ** blocks,unsigned int offset,int disks,size_t len,struct async_submit_ctl * submit)197 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
198 		   size_t len, struct async_submit_ctl *submit)
199 {
200 	int src_cnt = disks - 2;
201 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
202 						      &P(blocks, disks), 2,
203 						      blocks, src_cnt, len);
204 	struct dma_device *device = chan ? chan->device : NULL;
205 	dma_addr_t *dma_src = NULL;
206 
207 	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
208 
209 	if (submit->scribble)
210 		dma_src = submit->scribble;
211 	else if (sizeof(dma_addr_t) <= sizeof(struct page *))
212 		dma_src = (dma_addr_t *) blocks;
213 
214 	if (dma_src && device &&
215 	    (src_cnt <= dma_maxpq(device, 0) ||
216 	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
217 	    is_dma_pq_aligned(device, offset, 0, len)) {
218 		/* run the p+q asynchronously */
219 		pr_debug("%s: (async) disks: %d len: %zu\n",
220 			 __func__, disks, len);
221 		return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
222 					     disks, len, dma_src, submit);
223 	}
224 
225 	/* run the pq synchronously */
226 	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
227 
228 	/* wait for any prerequisite operations */
229 	async_tx_quiesce(&submit->depend_tx);
230 
231 	if (!P(blocks, disks)) {
232 		P(blocks, disks) = pq_scribble_page;
233 		BUG_ON(len + offset > PAGE_SIZE);
234 	}
235 	if (!Q(blocks, disks)) {
236 		Q(blocks, disks) = pq_scribble_page;
237 		BUG_ON(len + offset > PAGE_SIZE);
238 	}
239 	do_sync_gen_syndrome(blocks, offset, disks, len, submit);
240 
241 	return NULL;
242 }
243 EXPORT_SYMBOL_GPL(async_gen_syndrome);
244 
245 static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl * submit,struct page ** blocks,int disks,size_t len)246 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
247 {
248 	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
249 	return NULL;
250 	#endif
251 	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
252 				     disks, len);
253 }
254 
255 /**
256  * async_syndrome_val - asynchronously validate a raid6 syndrome
257  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
258  * @offset: common offset into each block (src and dest) to start transaction
259  * @disks: number of blocks (including missing P or Q, see below)
260  * @len: length of operation in bytes
261  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
262  * @spare: temporary result buffer for the synchronous case
263  * @submit: submission / completion modifiers
264  *
265  * The same notes from async_gen_syndrome apply to the 'blocks',
266  * and 'disks' parameters of this routine.  The synchronous path
267  * requires a temporary result buffer and submit->scribble to be
268  * specified.
269  */
270 struct dma_async_tx_descriptor *
async_syndrome_val(struct page ** blocks,unsigned int offset,int disks,size_t len,enum sum_check_flags * pqres,struct page * spare,struct async_submit_ctl * submit)271 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
272 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
273 		   struct async_submit_ctl *submit)
274 {
275 	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
276 	struct dma_device *device = chan ? chan->device : NULL;
277 	struct dma_async_tx_descriptor *tx;
278 	unsigned char coefs[disks-2];
279 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
280 	dma_addr_t *dma_src = NULL;
281 	int src_cnt = 0;
282 
283 	BUG_ON(disks < 4);
284 
285 	if (submit->scribble)
286 		dma_src = submit->scribble;
287 	else if (sizeof(dma_addr_t) <= sizeof(struct page *))
288 		dma_src = (dma_addr_t *) blocks;
289 
290 	if (dma_src && device && disks <= dma_maxpq(device, 0) &&
291 	    is_dma_pq_aligned(device, offset, 0, len)) {
292 		struct device *dev = device->dev;
293 		dma_addr_t *pq = &dma_src[disks-2];
294 		int i;
295 
296 		pr_debug("%s: (async) disks: %d len: %zu\n",
297 			 __func__, disks, len);
298 		if (!P(blocks, disks))
299 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
300 		else
301 			pq[0] = dma_map_page(dev, P(blocks, disks),
302 					     offset, len,
303 					     DMA_TO_DEVICE);
304 		if (!Q(blocks, disks))
305 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
306 		else
307 			pq[1] = dma_map_page(dev, Q(blocks, disks),
308 					     offset, len,
309 					     DMA_TO_DEVICE);
310 
311 		if (submit->flags & ASYNC_TX_FENCE)
312 			dma_flags |= DMA_PREP_FENCE;
313 		for (i = 0; i < disks-2; i++)
314 			if (likely(blocks[i])) {
315 				dma_src[src_cnt] = dma_map_page(dev, blocks[i],
316 								offset, len,
317 								DMA_TO_DEVICE);
318 				coefs[src_cnt] = raid6_gfexp[i];
319 				src_cnt++;
320 			}
321 
322 		for (;;) {
323 			tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
324 							    src_cnt,
325 							    coefs,
326 							    len, pqres,
327 							    dma_flags);
328 			if (likely(tx))
329 				break;
330 			async_tx_quiesce(&submit->depend_tx);
331 			dma_async_issue_pending(chan);
332 		}
333 		async_tx_submit(chan, tx, submit);
334 
335 		return tx;
336 	} else {
337 		struct page *p_src = P(blocks, disks);
338 		struct page *q_src = Q(blocks, disks);
339 		enum async_tx_flags flags_orig = submit->flags;
340 		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
341 		void *scribble = submit->scribble;
342 		void *cb_param_orig = submit->cb_param;
343 		void *p, *q, *s;
344 
345 		pr_debug("%s: (sync) disks: %d len: %zu\n",
346 			 __func__, disks, len);
347 
348 		/* caller must provide a temporary result buffer and
349 		 * allow the input parameters to be preserved
350 		 */
351 		BUG_ON(!spare || !scribble);
352 
353 		/* wait for any prerequisite operations */
354 		async_tx_quiesce(&submit->depend_tx);
355 
356 		/* recompute p and/or q into the temporary buffer and then
357 		 * check to see the result matches the current value
358 		 */
359 		tx = NULL;
360 		*pqres = 0;
361 		if (p_src) {
362 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
363 					  NULL, NULL, scribble);
364 			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
365 			async_tx_quiesce(&tx);
366 			p = page_address(p_src) + offset;
367 			s = page_address(spare) + offset;
368 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
369 		}
370 
371 		if (q_src) {
372 			P(blocks, disks) = NULL;
373 			Q(blocks, disks) = spare;
374 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
375 			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
376 			async_tx_quiesce(&tx);
377 			q = page_address(q_src) + offset;
378 			s = page_address(spare) + offset;
379 			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
380 		}
381 
382 		/* restore P, Q and submit */
383 		P(blocks, disks) = p_src;
384 		Q(blocks, disks) = q_src;
385 
386 		submit->cb_fn = cb_fn_orig;
387 		submit->cb_param = cb_param_orig;
388 		submit->flags = flags_orig;
389 		async_tx_sync_epilog(submit);
390 
391 		return NULL;
392 	}
393 }
394 EXPORT_SYMBOL_GPL(async_syndrome_val);
395 
async_pq_init(void)396 static int __init async_pq_init(void)
397 {
398 	pq_scribble_page = alloc_page(GFP_KERNEL);
399 
400 	if (pq_scribble_page)
401 		return 0;
402 
403 	pr_err("%s: failed to allocate required spare page\n", __func__);
404 
405 	return -ENOMEM;
406 }
407 
async_pq_exit(void)408 static void __exit async_pq_exit(void)
409 {
410 	put_page(pq_scribble_page);
411 }
412 
413 module_init(async_pq_init);
414 module_exit(async_pq_exit);
415 
416 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
417 MODULE_LICENSE("GPL");
418