1 /*
2 * zfcp device driver
3 *
4 * Setup and helper functions to access QDIO.
5 *
6 * Copyright IBM Corporation 2002, 2010
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/slab.h>
13 #include "zfcp_ext.h"
14 #include "zfcp_qdio.h"
15
16 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
17
zfcp_qdio_buffers_enqueue(struct qdio_buffer ** sbal)18 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
19 {
20 int pos;
21
22 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
23 sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
24 if (!sbal[pos])
25 return -ENOMEM;
26 }
27 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
28 if (pos % QBUFF_PER_PAGE)
29 sbal[pos] = sbal[pos - 1] + 1;
30 return 0;
31 }
32
zfcp_qdio_handler_error(struct zfcp_qdio * qdio,char * id,unsigned int qdio_err)33 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
34 unsigned int qdio_err)
35 {
36 struct zfcp_adapter *adapter = qdio->adapter;
37
38 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
39
40 if (qdio_err & QDIO_ERROR_SLSB_STATE)
41 zfcp_qdio_siosl(adapter);
42 zfcp_erp_adapter_reopen(adapter,
43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
44 ZFCP_STATUS_COMMON_ERP_FAILED, id);
45 }
46
zfcp_qdio_zero_sbals(struct qdio_buffer * sbal[],int first,int cnt)47 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
48 {
49 int i, sbal_idx;
50
51 for (i = first; i < first + cnt; i++) {
52 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
53 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
54 }
55 }
56
57 /* this needs to be called prior to updating the queue fill level */
zfcp_qdio_account(struct zfcp_qdio * qdio)58 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
59 {
60 unsigned long long now, span;
61 int used;
62
63 now = get_clock_monotonic();
64 span = (now - qdio->req_q_time) >> 12;
65 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
66 qdio->req_q_util += used * span;
67 qdio->req_q_time = now;
68 }
69
zfcp_qdio_int_req(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)70 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
71 int queue_no, int idx, int count,
72 unsigned long parm)
73 {
74 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
75
76 if (unlikely(qdio_err)) {
77 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
78 return;
79 }
80
81 /* cleanup all SBALs being program-owned now */
82 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
83
84 spin_lock_irq(&qdio->stat_lock);
85 zfcp_qdio_account(qdio);
86 spin_unlock_irq(&qdio->stat_lock);
87 atomic_add(count, &qdio->req_q_free);
88 wake_up(&qdio->req_q_wq);
89 }
90
zfcp_qdio_int_resp(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)91 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
92 int queue_no, int idx, int count,
93 unsigned long parm)
94 {
95 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
96 int sbal_idx, sbal_no;
97
98 if (unlikely(qdio_err)) {
99 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
100 return;
101 }
102
103 /*
104 * go through all SBALs from input queue currently
105 * returned by QDIO layer
106 */
107 for (sbal_no = 0; sbal_no < count; sbal_no++) {
108 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
109 /* go through all SBALEs of SBAL */
110 zfcp_fsf_reqid_check(qdio, sbal_idx);
111 }
112
113 /*
114 * put SBALs back to response queue
115 */
116 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
117 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
118 }
119
120 static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)121 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
122 {
123 struct qdio_buffer_element *sbale;
124
125 /* set last entry flag in current SBALE of current SBAL */
126 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
127 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
128
129 /* don't exceed last allowed SBAL */
130 if (q_req->sbal_last == q_req->sbal_limit)
131 return NULL;
132
133 /* set chaining flag in first SBALE of current SBAL */
134 sbale = zfcp_qdio_sbale_req(qdio, q_req);
135 sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
136
137 /* calculate index of next SBAL */
138 q_req->sbal_last++;
139 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
140
141 /* keep this requests number of SBALs up-to-date */
142 q_req->sbal_number++;
143 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
144
145 /* start at first SBALE of new SBAL */
146 q_req->sbale_curr = 0;
147
148 /* set storage-block type for new SBAL */
149 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
150 sbale->flags |= q_req->sbtype;
151
152 return sbale;
153 }
154
155 static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)156 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
157 {
158 if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
159 return zfcp_qdio_sbal_chain(qdio, q_req);
160 q_req->sbale_curr++;
161 return zfcp_qdio_sbale_curr(qdio, q_req);
162 }
163
164 /**
165 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
166 * @qdio: pointer to struct zfcp_qdio
167 * @q_req: pointer to struct zfcp_qdio_req
168 * @sg: scatter-gather list
169 * @max_sbals: upper bound for number of SBALs to be used
170 * Returns: number of bytes, or error (negativ)
171 */
zfcp_qdio_sbals_from_sg(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg)172 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
173 struct scatterlist *sg)
174 {
175 struct qdio_buffer_element *sbale;
176 int bytes = 0;
177
178 /* set storage-block type for this request */
179 sbale = zfcp_qdio_sbale_req(qdio, q_req);
180 sbale->flags |= q_req->sbtype;
181
182 for (; sg; sg = sg_next(sg)) {
183 sbale = zfcp_qdio_sbale_next(qdio, q_req);
184 if (!sbale) {
185 atomic_inc(&qdio->req_q_full);
186 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
187 q_req->sbal_number);
188 return -EINVAL;
189 }
190
191 sbale->addr = sg_virt(sg);
192 sbale->length = sg->length;
193
194 bytes += sg->length;
195 }
196
197 return bytes;
198 }
199
zfcp_qdio_sbal_check(struct zfcp_qdio * qdio)200 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
201 {
202 spin_lock_irq(&qdio->req_q_lock);
203 if (atomic_read(&qdio->req_q_free) ||
204 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
205 return 1;
206 spin_unlock_irq(&qdio->req_q_lock);
207 return 0;
208 }
209
210 /**
211 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
212 * @qdio: pointer to struct zfcp_qdio
213 *
214 * The req_q_lock must be held by the caller of this function, and
215 * this function may only be called from process context; it will
216 * sleep when waiting for a free sbal.
217 *
218 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
219 */
zfcp_qdio_sbal_get(struct zfcp_qdio * qdio)220 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
221 {
222 long ret;
223
224 spin_unlock_irq(&qdio->req_q_lock);
225 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
226 zfcp_qdio_sbal_check(qdio), 5 * HZ);
227
228 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
229 return -EIO;
230
231 if (ret > 0)
232 return 0;
233
234 if (!ret) {
235 atomic_inc(&qdio->req_q_full);
236 /* assume hanging outbound queue, try queue recovery */
237 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
238 }
239
240 spin_lock_irq(&qdio->req_q_lock);
241 return -EIO;
242 }
243
244 /**
245 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
246 * @qdio: pointer to struct zfcp_qdio
247 * @q_req: pointer to struct zfcp_qdio_req
248 * Returns: 0 on success, error otherwise
249 */
zfcp_qdio_send(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)250 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
251 {
252 int retval;
253 u8 sbal_number = q_req->sbal_number;
254
255 spin_lock(&qdio->stat_lock);
256 zfcp_qdio_account(qdio);
257 spin_unlock(&qdio->stat_lock);
258
259 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
260 q_req->sbal_first, sbal_number);
261
262 if (unlikely(retval)) {
263 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
264 sbal_number);
265 return retval;
266 }
267
268 /* account for transferred buffers */
269 atomic_sub(sbal_number, &qdio->req_q_free);
270 qdio->req_q_idx += sbal_number;
271 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
272
273 return 0;
274 }
275
276
zfcp_qdio_setup_init_data(struct qdio_initialize * id,struct zfcp_qdio * qdio)277 static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
278 struct zfcp_qdio *qdio)
279 {
280 memset(id, 0, sizeof(*id));
281 id->cdev = qdio->adapter->ccw_device;
282 id->q_format = QDIO_ZFCP_QFMT;
283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
284 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
286 id->no_input_qs = 1;
287 id->no_output_qs = 1;
288 id->input_handler = zfcp_qdio_int_resp;
289 id->output_handler = zfcp_qdio_int_req;
290 id->int_parm = (unsigned long) qdio;
291 id->input_sbal_addr_array = (void **) (qdio->res_q);
292 id->output_sbal_addr_array = (void **) (qdio->req_q);
293 id->scan_threshold =
294 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
295 }
296
297 /**
298 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
299 * @adapter: pointer to struct zfcp_adapter
300 * Returns: -ENOMEM on memory allocation error or return value from
301 * qdio_allocate
302 */
zfcp_qdio_allocate(struct zfcp_qdio * qdio)303 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
304 {
305 struct qdio_initialize init_data;
306
307 if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
308 zfcp_qdio_buffers_enqueue(qdio->res_q))
309 return -ENOMEM;
310
311 zfcp_qdio_setup_init_data(&init_data, qdio);
312 init_waitqueue_head(&qdio->req_q_wq);
313
314 return qdio_allocate(&init_data);
315 }
316
317 /**
318 * zfcp_close_qdio - close qdio queues for an adapter
319 * @qdio: pointer to structure zfcp_qdio
320 */
zfcp_qdio_close(struct zfcp_qdio * qdio)321 void zfcp_qdio_close(struct zfcp_qdio *qdio)
322 {
323 struct zfcp_adapter *adapter = qdio->adapter;
324 int idx, count;
325
326 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
327 return;
328
329 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
330 spin_lock_irq(&qdio->req_q_lock);
331 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
332 spin_unlock_irq(&qdio->req_q_lock);
333
334 wake_up(&qdio->req_q_wq);
335
336 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
337
338 /* cleanup used outbound sbals */
339 count = atomic_read(&qdio->req_q_free);
340 if (count < QDIO_MAX_BUFFERS_PER_Q) {
341 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
342 count = QDIO_MAX_BUFFERS_PER_Q - count;
343 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
344 }
345 qdio->req_q_idx = 0;
346 atomic_set(&qdio->req_q_free, 0);
347 }
348
349 /**
350 * zfcp_qdio_open - prepare and initialize response queue
351 * @qdio: pointer to struct zfcp_qdio
352 * Returns: 0 on success, otherwise -EIO
353 */
zfcp_qdio_open(struct zfcp_qdio * qdio)354 int zfcp_qdio_open(struct zfcp_qdio *qdio)
355 {
356 struct qdio_buffer_element *sbale;
357 struct qdio_initialize init_data;
358 struct zfcp_adapter *adapter = qdio->adapter;
359 struct ccw_device *cdev = adapter->ccw_device;
360 struct qdio_ssqd_desc ssqd;
361 int cc;
362
363 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
364 return -EIO;
365
366 atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
367 &qdio->adapter->status);
368
369 zfcp_qdio_setup_init_data(&init_data, qdio);
370
371 if (qdio_establish(&init_data))
372 goto failed_establish;
373
374 if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
375 goto failed_qdio;
376
377 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
378 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
379 &qdio->adapter->status);
380
381 if (qdio_activate(cdev))
382 goto failed_qdio;
383
384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
385 sbale = &(qdio->res_q[cc]->element[0]);
386 sbale->length = 0;
387 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
388 sbale->addr = NULL;
389 }
390
391 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
392 goto failed_qdio;
393
394 /* set index of first available SBALS / number of available SBALS */
395 qdio->req_q_idx = 0;
396 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
397 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
398
399 return 0;
400
401 failed_qdio:
402 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
403 failed_establish:
404 dev_err(&cdev->dev,
405 "Setting up the QDIO connection to the FCP adapter failed\n");
406 return -EIO;
407 }
408
zfcp_qdio_destroy(struct zfcp_qdio * qdio)409 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
410 {
411 int p;
412
413 if (!qdio)
414 return;
415
416 if (qdio->adapter->ccw_device)
417 qdio_free(qdio->adapter->ccw_device);
418
419 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
420 free_page((unsigned long) qdio->req_q[p]);
421 free_page((unsigned long) qdio->res_q[p]);
422 }
423
424 kfree(qdio);
425 }
426
zfcp_qdio_setup(struct zfcp_adapter * adapter)427 int zfcp_qdio_setup(struct zfcp_adapter *adapter)
428 {
429 struct zfcp_qdio *qdio;
430
431 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
432 if (!qdio)
433 return -ENOMEM;
434
435 qdio->adapter = adapter;
436
437 if (zfcp_qdio_allocate(qdio)) {
438 zfcp_qdio_destroy(qdio);
439 return -ENOMEM;
440 }
441
442 spin_lock_init(&qdio->req_q_lock);
443 spin_lock_init(&qdio->stat_lock);
444
445 adapter->qdio = qdio;
446 return 0;
447 }
448
449 /**
450 * zfcp_qdio_siosl - Trigger logging in FCP channel
451 * @adapter: The zfcp_adapter where to trigger logging
452 *
453 * Call the cio siosl function to trigger hardware logging. This
454 * wrapper function sets a flag to ensure hardware logging is only
455 * triggered once before going through qdio shutdown.
456 *
457 * The triggers are always run from qdio tasklet context, so no
458 * additional synchronization is necessary.
459 */
zfcp_qdio_siosl(struct zfcp_adapter * adapter)460 void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
461 {
462 int rc;
463
464 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
465 return;
466
467 rc = ccw_device_siosl(adapter->ccw_device);
468 if (!rc)
469 atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
470 &adapter->status);
471 }
472