1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corp. 2002, 2013
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20
21 struct kmem_cache *zfcp_fsf_qtcb_cache;
22
zfcp_fsf_request_timeout_handler(unsigned long data)23 static void zfcp_fsf_request_timeout_handler(unsigned long data)
24 {
25 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
26 zfcp_qdio_siosl(adapter);
27 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
28 "fsrth_1");
29 }
30
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)31 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32 unsigned long timeout)
33 {
34 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36 fsf_req->timer.expires = jiffies + timeout;
37 add_timer(&fsf_req->timer);
38 }
39
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)40 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41 {
42 BUG_ON(!fsf_req->erp_action);
43 fsf_req->timer.function = zfcp_erp_timeout_handler;
44 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45 fsf_req->timer.expires = jiffies + 30 * HZ;
46 add_timer(&fsf_req->timer);
47 }
48
49 /* association between FSF command and FSF QTCB type */
50 static u32 fsf_qtcb_type[] = {
51 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
52 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
54 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
57 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
59 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
60 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
61 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
62 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
63 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
64 };
65
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
67 {
68 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
69 "operational because of an unsupported FC class\n");
70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
71 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
72 }
73
74 /**
75 * zfcp_fsf_req_free - free memory used by fsf request
76 * @fsf_req: pointer to struct zfcp_fsf_req
77 */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
79 {
80 if (likely(req->pool)) {
81 if (likely(req->qtcb))
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
83 mempool_free(req, req->pool);
84 return;
85 }
86
87 if (likely(req->qtcb))
88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
89 kfree(req);
90 }
91
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
93 {
94 unsigned long flags;
95 struct fsf_status_read_buffer *sr_buf = req->data;
96 struct zfcp_adapter *adapter = req->adapter;
97 struct zfcp_port *port;
98 int d_id = ntoh24(sr_buf->d_id);
99
100 read_lock_irqsave(&adapter->port_list_lock, flags);
101 list_for_each_entry(port, &adapter->port_list, list)
102 if (port->d_id == d_id) {
103 zfcp_erp_port_reopen(port, 0, "fssrpc1");
104 break;
105 }
106 read_unlock_irqrestore(&adapter->port_list_lock, flags);
107 }
108
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
110 struct fsf_link_down_info *link_down)
111 {
112 struct zfcp_adapter *adapter = req->adapter;
113
114 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
115 return;
116
117 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
118
119 zfcp_scsi_schedule_rports_block(adapter);
120
121 if (!link_down)
122 goto out;
123
124 switch (link_down->error_code) {
125 case FSF_PSQ_LINK_NO_LIGHT:
126 dev_warn(&req->adapter->ccw_device->dev,
127 "There is no light signal from the local "
128 "fibre channel cable\n");
129 break;
130 case FSF_PSQ_LINK_WRAP_PLUG:
131 dev_warn(&req->adapter->ccw_device->dev,
132 "There is a wrap plug instead of a fibre "
133 "channel cable\n");
134 break;
135 case FSF_PSQ_LINK_NO_FCP:
136 dev_warn(&req->adapter->ccw_device->dev,
137 "The adjacent fibre channel node does not "
138 "support FCP\n");
139 break;
140 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
141 dev_warn(&req->adapter->ccw_device->dev,
142 "The FCP device is suspended because of a "
143 "firmware update\n");
144 break;
145 case FSF_PSQ_LINK_INVALID_WWPN:
146 dev_warn(&req->adapter->ccw_device->dev,
147 "The FCP device detected a WWPN that is "
148 "duplicate or not valid\n");
149 break;
150 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
151 dev_warn(&req->adapter->ccw_device->dev,
152 "The fibre channel fabric does not support NPIV\n");
153 break;
154 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
155 dev_warn(&req->adapter->ccw_device->dev,
156 "The FCP adapter cannot support more NPIV ports\n");
157 break;
158 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
159 dev_warn(&req->adapter->ccw_device->dev,
160 "The adjacent switch cannot support "
161 "more NPIV ports\n");
162 break;
163 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
164 dev_warn(&req->adapter->ccw_device->dev,
165 "The FCP adapter could not log in to the "
166 "fibre channel fabric\n");
167 break;
168 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
169 dev_warn(&req->adapter->ccw_device->dev,
170 "The WWPN assignment file on the FCP adapter "
171 "has been damaged\n");
172 break;
173 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
174 dev_warn(&req->adapter->ccw_device->dev,
175 "The mode table on the FCP adapter "
176 "has been damaged\n");
177 break;
178 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
179 dev_warn(&req->adapter->ccw_device->dev,
180 "All NPIV ports on the FCP adapter have "
181 "been assigned\n");
182 break;
183 default:
184 dev_warn(&req->adapter->ccw_device->dev,
185 "The link between the FCP adapter and "
186 "the FC fabric is down\n");
187 }
188 out:
189 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
190 }
191
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
193 {
194 struct fsf_status_read_buffer *sr_buf = req->data;
195 struct fsf_link_down_info *ldi =
196 (struct fsf_link_down_info *) &sr_buf->payload;
197
198 switch (sr_buf->status_subtype) {
199 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
200 zfcp_fsf_link_down_info_eval(req, ldi);
201 break;
202 case FSF_STATUS_READ_SUB_FDISC_FAILED:
203 zfcp_fsf_link_down_info_eval(req, ldi);
204 break;
205 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
206 zfcp_fsf_link_down_info_eval(req, NULL);
207 };
208 }
209
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)210 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
211 {
212 struct zfcp_adapter *adapter = req->adapter;
213 struct fsf_status_read_buffer *sr_buf = req->data;
214
215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
216 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
218 zfcp_fsf_req_free(req);
219 return;
220 }
221
222 zfcp_dbf_hba_fsf_uss("fssrh_4", req);
223
224 switch (sr_buf->status_type) {
225 case FSF_STATUS_READ_PORT_CLOSED:
226 zfcp_fsf_status_read_port_closed(req);
227 break;
228 case FSF_STATUS_READ_INCOMING_ELS:
229 zfcp_fc_incoming_els(req);
230 break;
231 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
232 break;
233 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
234 dev_warn(&adapter->ccw_device->dev,
235 "The error threshold for checksum statistics "
236 "has been exceeded\n");
237 zfcp_dbf_hba_bit_err("fssrh_3", req);
238 break;
239 case FSF_STATUS_READ_LINK_DOWN:
240 zfcp_fsf_status_read_link_down(req);
241 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
242 break;
243 case FSF_STATUS_READ_LINK_UP:
244 dev_info(&adapter->ccw_device->dev,
245 "The local link has been restored\n");
246 /* All ports should be marked as ready to run again */
247 zfcp_erp_set_adapter_status(adapter,
248 ZFCP_STATUS_COMMON_RUNNING);
249 zfcp_erp_adapter_reopen(adapter,
250 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
251 ZFCP_STATUS_COMMON_ERP_FAILED,
252 "fssrh_2");
253 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
254
255 break;
256 case FSF_STATUS_READ_NOTIFICATION_LOST:
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
258 zfcp_cfdc_adapter_access_changed(adapter);
259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
260 queue_work(adapter->work_queue, &adapter->scan_work);
261 break;
262 case FSF_STATUS_READ_CFDC_UPDATED:
263 zfcp_cfdc_adapter_access_changed(adapter);
264 break;
265 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
266 adapter->adapter_features = sr_buf->payload.word[0];
267 break;
268 }
269
270 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
271 zfcp_fsf_req_free(req);
272
273 atomic_inc(&adapter->stat_miss);
274 queue_work(adapter->work_queue, &adapter->stat_work);
275 }
276
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)277 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
278 {
279 switch (req->qtcb->header.fsf_status_qual.word[0]) {
280 case FSF_SQ_FCP_RSP_AVAILABLE:
281 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
282 case FSF_SQ_NO_RETRY_POSSIBLE:
283 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
284 return;
285 case FSF_SQ_COMMAND_ABORTED:
286 break;
287 case FSF_SQ_NO_RECOM:
288 dev_err(&req->adapter->ccw_device->dev,
289 "The FCP adapter reported a problem "
290 "that cannot be recovered\n");
291 zfcp_qdio_siosl(req->adapter);
292 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
293 break;
294 }
295 /* all non-return stats set FSFREQ_ERROR*/
296 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
297 }
298
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)299 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
300 {
301 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
302 return;
303
304 switch (req->qtcb->header.fsf_status) {
305 case FSF_UNKNOWN_COMMAND:
306 dev_err(&req->adapter->ccw_device->dev,
307 "The FCP adapter does not recognize the command 0x%x\n",
308 req->qtcb->header.fsf_command);
309 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
310 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
311 break;
312 case FSF_ADAPTER_STATUS_AVAILABLE:
313 zfcp_fsf_fsfstatus_qual_eval(req);
314 break;
315 }
316 }
317
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)318 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
319 {
320 struct zfcp_adapter *adapter = req->adapter;
321 struct fsf_qtcb *qtcb = req->qtcb;
322 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
323
324 zfcp_dbf_hba_fsf_response(req);
325
326 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
327 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
328 return;
329 }
330
331 switch (qtcb->prefix.prot_status) {
332 case FSF_PROT_GOOD:
333 case FSF_PROT_FSF_STATUS_PRESENTED:
334 return;
335 case FSF_PROT_QTCB_VERSION_ERROR:
336 dev_err(&adapter->ccw_device->dev,
337 "QTCB version 0x%x not supported by FCP adapter "
338 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
339 psq->word[0], psq->word[1]);
340 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
341 break;
342 case FSF_PROT_ERROR_STATE:
343 case FSF_PROT_SEQ_NUMB_ERROR:
344 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
345 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
346 break;
347 case FSF_PROT_UNSUPP_QTCB_TYPE:
348 dev_err(&adapter->ccw_device->dev,
349 "The QTCB type is not supported by the FCP adapter\n");
350 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
351 break;
352 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
353 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
354 &adapter->status);
355 break;
356 case FSF_PROT_DUPLICATE_REQUEST_ID:
357 dev_err(&adapter->ccw_device->dev,
358 "0x%Lx is an ambiguous request identifier\n",
359 (unsigned long long)qtcb->bottom.support.req_handle);
360 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
361 break;
362 case FSF_PROT_LINK_DOWN:
363 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
364 /* go through reopen to flush pending requests */
365 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
366 break;
367 case FSF_PROT_REEST_QUEUE:
368 /* All ports should be marked as ready to run again */
369 zfcp_erp_set_adapter_status(adapter,
370 ZFCP_STATUS_COMMON_RUNNING);
371 zfcp_erp_adapter_reopen(adapter,
372 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
373 ZFCP_STATUS_COMMON_ERP_FAILED,
374 "fspse_8");
375 break;
376 default:
377 dev_err(&adapter->ccw_device->dev,
378 "0x%x is not a valid transfer protocol status\n",
379 qtcb->prefix.prot_status);
380 zfcp_qdio_siosl(adapter);
381 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
382 }
383 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
384 }
385
386 /**
387 * zfcp_fsf_req_complete - process completion of a FSF request
388 * @fsf_req: The FSF request that has been completed.
389 *
390 * When a request has been completed either from the FCP adapter,
391 * or it has been dismissed due to a queue shutdown, this function
392 * is called to process the completion status and trigger further
393 * events related to the FSF request.
394 */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)395 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
396 {
397 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
398 zfcp_fsf_status_read_handler(req);
399 return;
400 }
401
402 del_timer(&req->timer);
403 zfcp_fsf_protstatus_eval(req);
404 zfcp_fsf_fsfstatus_eval(req);
405 req->handler(req);
406
407 if (req->erp_action)
408 zfcp_erp_notify(req->erp_action, 0);
409
410 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
411 zfcp_fsf_req_free(req);
412 else
413 complete(&req->completion);
414 }
415
416 /**
417 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
418 * @adapter: pointer to struct zfcp_adapter
419 *
420 * Never ever call this without shutting down the adapter first.
421 * Otherwise the adapter would continue using and corrupting s390 storage.
422 * Included BUG_ON() call to ensure this is done.
423 * ERP is supposed to be the only user of this function.
424 */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)425 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
426 {
427 struct zfcp_fsf_req *req, *tmp;
428 LIST_HEAD(remove_queue);
429
430 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
431 zfcp_reqlist_move(adapter->req_list, &remove_queue);
432
433 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
434 list_del(&req->list);
435 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
436 zfcp_fsf_req_complete(req);
437 }
438 }
439
440 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
441 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
442 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
443 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
444 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
445 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
446 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
447
zfcp_fsf_convert_portspeed(u32 fsf_speed)448 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
449 {
450 u32 fdmi_speed = 0;
451 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
452 fdmi_speed |= FC_PORTSPEED_1GBIT;
453 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
454 fdmi_speed |= FC_PORTSPEED_2GBIT;
455 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
456 fdmi_speed |= FC_PORTSPEED_4GBIT;
457 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
458 fdmi_speed |= FC_PORTSPEED_10GBIT;
459 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
460 fdmi_speed |= FC_PORTSPEED_8GBIT;
461 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
462 fdmi_speed |= FC_PORTSPEED_16GBIT;
463 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
464 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
465 return fdmi_speed;
466 }
467
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)468 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
469 {
470 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
471 struct zfcp_adapter *adapter = req->adapter;
472 struct Scsi_Host *shost = adapter->scsi_host;
473 struct fc_els_flogi *nsp, *plogi;
474
475 /* adjust pointers for missing command code */
476 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
477 - sizeof(u32));
478 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
479 - sizeof(u32));
480
481 if (req->data)
482 memcpy(req->data, bottom, sizeof(*bottom));
483
484 fc_host_port_name(shost) = nsp->fl_wwpn;
485 fc_host_node_name(shost) = nsp->fl_wwnn;
486 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
487
488 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
489 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
490 (u16)FSF_STATUS_READS_RECOM);
491
492 if (fc_host_permanent_port_name(shost) == -1)
493 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
494
495 zfcp_scsi_set_prot(adapter);
496
497 /* no error return above here, otherwise must fix call chains */
498 /* do not evaluate invalid fields */
499 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
500 return 0;
501
502 fc_host_port_id(shost) = ntoh24(bottom->s_id);
503 fc_host_speed(shost) =
504 zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
505
506 adapter->hydra_version = bottom->adapter_type;
507
508 switch (bottom->fc_topology) {
509 case FSF_TOPO_P2P:
510 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
511 adapter->peer_wwpn = plogi->fl_wwpn;
512 adapter->peer_wwnn = plogi->fl_wwnn;
513 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
514 break;
515 case FSF_TOPO_FABRIC:
516 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
517 break;
518 case FSF_TOPO_AL:
519 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
520 /* fall through */
521 default:
522 dev_err(&adapter->ccw_device->dev,
523 "Unknown or unsupported arbitrated loop "
524 "fibre channel topology detected\n");
525 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
526 return -EIO;
527 }
528
529 return 0;
530 }
531
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)532 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
533 {
534 struct zfcp_adapter *adapter = req->adapter;
535 struct fsf_qtcb *qtcb = req->qtcb;
536 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
537 struct Scsi_Host *shost = adapter->scsi_host;
538
539 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
540 return;
541
542 adapter->fsf_lic_version = bottom->lic_version;
543 adapter->adapter_features = bottom->adapter_features;
544 adapter->connection_features = bottom->connection_features;
545 adapter->peer_wwpn = 0;
546 adapter->peer_wwnn = 0;
547 adapter->peer_d_id = 0;
548
549 switch (qtcb->header.fsf_status) {
550 case FSF_GOOD:
551 if (zfcp_fsf_exchange_config_evaluate(req))
552 return;
553
554 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
555 dev_err(&adapter->ccw_device->dev,
556 "FCP adapter maximum QTCB size (%d bytes) "
557 "is too small\n",
558 bottom->max_qtcb_size);
559 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
560 return;
561 }
562 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
563 &adapter->status);
564 break;
565 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
566 fc_host_node_name(shost) = 0;
567 fc_host_port_name(shost) = 0;
568 fc_host_port_id(shost) = 0;
569 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
570 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
571 adapter->hydra_version = 0;
572
573 /* avoids adapter shutdown to be able to recognize
574 * events such as LINK UP */
575 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
576 &adapter->status);
577 zfcp_fsf_link_down_info_eval(req,
578 &qtcb->header.fsf_status_qual.link_down_info);
579 if (zfcp_fsf_exchange_config_evaluate(req))
580 return;
581 break;
582 default:
583 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
584 return;
585 }
586
587 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
588 adapter->hardware_version = bottom->hardware_version;
589 memcpy(fc_host_serial_number(shost), bottom->serial_number,
590 min(FC_SERIAL_NUMBER_SIZE, 17));
591 EBCASC(fc_host_serial_number(shost),
592 min(FC_SERIAL_NUMBER_SIZE, 17));
593 }
594
595 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
596 dev_err(&adapter->ccw_device->dev,
597 "The FCP adapter only supports newer "
598 "control block versions\n");
599 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
600 return;
601 }
602 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
603 dev_err(&adapter->ccw_device->dev,
604 "The FCP adapter only supports older "
605 "control block versions\n");
606 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
607 }
608 }
609
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)610 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
611 {
612 struct zfcp_adapter *adapter = req->adapter;
613 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
614 struct Scsi_Host *shost = adapter->scsi_host;
615
616 if (req->data)
617 memcpy(req->data, bottom, sizeof(*bottom));
618
619 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
620 fc_host_permanent_port_name(shost) = bottom->wwpn;
621 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
622 } else
623 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
624 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
625 fc_host_supported_speeds(shost) =
626 zfcp_fsf_convert_portspeed(bottom->supported_speed);
627 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
628 FC_FC4_LIST_SIZE);
629 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
630 FC_FC4_LIST_SIZE);
631 }
632
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)633 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
634 {
635 struct fsf_qtcb *qtcb = req->qtcb;
636
637 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
638 return;
639
640 switch (qtcb->header.fsf_status) {
641 case FSF_GOOD:
642 zfcp_fsf_exchange_port_evaluate(req);
643 break;
644 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
645 zfcp_fsf_exchange_port_evaluate(req);
646 zfcp_fsf_link_down_info_eval(req,
647 &qtcb->header.fsf_status_qual.link_down_info);
648 break;
649 }
650 }
651
zfcp_fsf_alloc(mempool_t * pool)652 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
653 {
654 struct zfcp_fsf_req *req;
655
656 if (likely(pool))
657 req = mempool_alloc(pool, GFP_ATOMIC);
658 else
659 req = kmalloc(sizeof(*req), GFP_ATOMIC);
660
661 if (unlikely(!req))
662 return NULL;
663
664 memset(req, 0, sizeof(*req));
665 req->pool = pool;
666 return req;
667 }
668
zfcp_qtcb_alloc(mempool_t * pool)669 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
670 {
671 struct fsf_qtcb *qtcb;
672
673 if (likely(pool))
674 qtcb = mempool_alloc(pool, GFP_ATOMIC);
675 else
676 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
677
678 if (unlikely(!qtcb))
679 return NULL;
680
681 memset(qtcb, 0, sizeof(*qtcb));
682 return qtcb;
683 }
684
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)685 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
686 u32 fsf_cmd, u8 sbtype,
687 mempool_t *pool)
688 {
689 struct zfcp_adapter *adapter = qdio->adapter;
690 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
691
692 if (unlikely(!req))
693 return ERR_PTR(-ENOMEM);
694
695 if (adapter->req_no == 0)
696 adapter->req_no++;
697
698 INIT_LIST_HEAD(&req->list);
699 init_timer(&req->timer);
700 init_completion(&req->completion);
701
702 req->adapter = adapter;
703 req->fsf_command = fsf_cmd;
704 req->req_id = adapter->req_no;
705
706 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
707 if (likely(pool))
708 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
709 else
710 req->qtcb = zfcp_qtcb_alloc(NULL);
711
712 if (unlikely(!req->qtcb)) {
713 zfcp_fsf_req_free(req);
714 return ERR_PTR(-ENOMEM);
715 }
716
717 req->seq_no = adapter->fsf_req_seq_no;
718 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
719 req->qtcb->prefix.req_id = req->req_id;
720 req->qtcb->prefix.ulp_info = 26;
721 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
722 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
723 req->qtcb->header.req_handle = req->req_id;
724 req->qtcb->header.fsf_command = req->fsf_command;
725 }
726
727 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
728 req->qtcb, sizeof(struct fsf_qtcb));
729
730 return req;
731 }
732
zfcp_fsf_req_send(struct zfcp_fsf_req * req)733 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
734 {
735 struct zfcp_adapter *adapter = req->adapter;
736 struct zfcp_qdio *qdio = adapter->qdio;
737 int with_qtcb = (req->qtcb != NULL);
738 int req_id = req->req_id;
739
740 zfcp_reqlist_add(adapter->req_list, req);
741
742 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
743 req->issued = get_clock();
744 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
745 del_timer(&req->timer);
746 /* lookup request again, list might have changed */
747 zfcp_reqlist_find_rm(adapter->req_list, req_id);
748 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
749 return -EIO;
750 }
751
752 /* Don't increase for unsolicited status */
753 if (with_qtcb)
754 adapter->fsf_req_seq_no++;
755 adapter->req_no++;
756
757 return 0;
758 }
759
760 /**
761 * zfcp_fsf_status_read - send status read request
762 * @adapter: pointer to struct zfcp_adapter
763 * @req_flags: request flags
764 * Returns: 0 on success, ERROR otherwise
765 */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)766 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
767 {
768 struct zfcp_adapter *adapter = qdio->adapter;
769 struct zfcp_fsf_req *req;
770 struct fsf_status_read_buffer *sr_buf;
771 struct page *page;
772 int retval = -EIO;
773
774 spin_lock_irq(&qdio->req_q_lock);
775 if (zfcp_qdio_sbal_get(qdio))
776 goto out;
777
778 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
779 adapter->pool.status_read_req);
780 if (IS_ERR(req)) {
781 retval = PTR_ERR(req);
782 goto out;
783 }
784
785 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
786 if (!page) {
787 retval = -ENOMEM;
788 goto failed_buf;
789 }
790 sr_buf = page_address(page);
791 memset(sr_buf, 0, sizeof(*sr_buf));
792 req->data = sr_buf;
793
794 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
795 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
796
797 retval = zfcp_fsf_req_send(req);
798 if (retval)
799 goto failed_req_send;
800
801 goto out;
802
803 failed_req_send:
804 req->data = NULL;
805 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
806 failed_buf:
807 zfcp_dbf_hba_fsf_uss("fssr__1", req);
808 zfcp_fsf_req_free(req);
809 out:
810 spin_unlock_irq(&qdio->req_q_lock);
811 return retval;
812 }
813
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)814 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
815 {
816 struct scsi_device *sdev = req->data;
817 struct zfcp_scsi_dev *zfcp_sdev;
818 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
819
820 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
821 return;
822
823 zfcp_sdev = sdev_to_zfcp(sdev);
824
825 switch (req->qtcb->header.fsf_status) {
826 case FSF_PORT_HANDLE_NOT_VALID:
827 if (fsq->word[0] == fsq->word[1]) {
828 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
829 "fsafch1");
830 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
831 }
832 break;
833 case FSF_LUN_HANDLE_NOT_VALID:
834 if (fsq->word[0] == fsq->word[1]) {
835 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
836 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
837 }
838 break;
839 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
840 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
841 break;
842 case FSF_PORT_BOXED:
843 zfcp_erp_set_port_status(zfcp_sdev->port,
844 ZFCP_STATUS_COMMON_ACCESS_BOXED);
845 zfcp_erp_port_reopen(zfcp_sdev->port,
846 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
847 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
848 break;
849 case FSF_LUN_BOXED:
850 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
851 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
852 "fsafch4");
853 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
854 break;
855 case FSF_ADAPTER_STATUS_AVAILABLE:
856 switch (fsq->word[0]) {
857 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
858 zfcp_fc_test_link(zfcp_sdev->port);
859 /* fall through */
860 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
861 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
862 break;
863 }
864 break;
865 case FSF_GOOD:
866 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
867 break;
868 }
869 }
870
871 /**
872 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
873 * @scmnd: The SCSI command to abort
874 * Returns: pointer to struct zfcp_fsf_req
875 */
876
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)877 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
878 {
879 struct zfcp_fsf_req *req = NULL;
880 struct scsi_device *sdev = scmnd->device;
881 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
882 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
883 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
884
885 spin_lock_irq(&qdio->req_q_lock);
886 if (zfcp_qdio_sbal_get(qdio))
887 goto out;
888 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
889 SBAL_SFLAGS0_TYPE_READ,
890 qdio->adapter->pool.scsi_abort);
891 if (IS_ERR(req)) {
892 req = NULL;
893 goto out;
894 }
895
896 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
897 ZFCP_STATUS_COMMON_UNBLOCKED)))
898 goto out_error_free;
899
900 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
901
902 req->data = sdev;
903 req->handler = zfcp_fsf_abort_fcp_command_handler;
904 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
905 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
906 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
907
908 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
909 if (!zfcp_fsf_req_send(req))
910 goto out;
911
912 out_error_free:
913 zfcp_fsf_req_free(req);
914 req = NULL;
915 out:
916 spin_unlock_irq(&qdio->req_q_lock);
917 return req;
918 }
919
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)920 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
921 {
922 struct zfcp_adapter *adapter = req->adapter;
923 struct zfcp_fsf_ct_els *ct = req->data;
924 struct fsf_qtcb_header *header = &req->qtcb->header;
925
926 ct->status = -EINVAL;
927
928 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
929 goto skip_fsfstatus;
930
931 switch (header->fsf_status) {
932 case FSF_GOOD:
933 zfcp_dbf_san_res("fsscth2", req);
934 ct->status = 0;
935 break;
936 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
937 zfcp_fsf_class_not_supp(req);
938 break;
939 case FSF_ADAPTER_STATUS_AVAILABLE:
940 switch (header->fsf_status_qual.word[0]){
941 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
942 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
943 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
944 break;
945 }
946 break;
947 case FSF_ACCESS_DENIED:
948 break;
949 case FSF_PORT_BOXED:
950 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
951 break;
952 case FSF_PORT_HANDLE_NOT_VALID:
953 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
954 /* fall through */
955 case FSF_GENERIC_COMMAND_REJECTED:
956 case FSF_PAYLOAD_SIZE_MISMATCH:
957 case FSF_REQUEST_SIZE_TOO_LARGE:
958 case FSF_RESPONSE_SIZE_TOO_LARGE:
959 case FSF_SBAL_MISMATCH:
960 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
961 break;
962 }
963
964 skip_fsfstatus:
965 if (ct->handler)
966 ct->handler(ct->handler_data);
967 }
968
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)969 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
970 struct zfcp_qdio_req *q_req,
971 struct scatterlist *sg_req,
972 struct scatterlist *sg_resp)
973 {
974 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
975 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
976 zfcp_qdio_set_sbale_last(qdio, q_req);
977 }
978
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)979 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
980 struct scatterlist *sg_req,
981 struct scatterlist *sg_resp)
982 {
983 struct zfcp_adapter *adapter = req->adapter;
984 struct zfcp_qdio *qdio = adapter->qdio;
985 struct fsf_qtcb *qtcb = req->qtcb;
986 u32 feat = adapter->adapter_features;
987
988 if (zfcp_adapter_multi_buffer_active(adapter)) {
989 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
990 return -EIO;
991 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
992 return -EIO;
993
994 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
995 zfcp_qdio_sbale_count(sg_req));
996 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
997 zfcp_qdio_set_scount(qdio, &req->qdio_req);
998 return 0;
999 }
1000
1001 /* use single, unchained SBAL if it can hold the request */
1002 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1003 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1004 sg_req, sg_resp);
1005 return 0;
1006 }
1007
1008 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1009 return -EOPNOTSUPP;
1010
1011 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1012 return -EIO;
1013
1014 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1015
1016 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1017 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1018
1019 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1020 return -EIO;
1021
1022 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1023
1024 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1025
1026 return 0;
1027 }
1028
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1029 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1030 struct scatterlist *sg_req,
1031 struct scatterlist *sg_resp,
1032 unsigned int timeout)
1033 {
1034 int ret;
1035
1036 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1037 if (ret)
1038 return ret;
1039
1040 /* common settings for ct/gs and els requests */
1041 if (timeout > 255)
1042 timeout = 255; /* max value accepted by hardware */
1043 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1044 req->qtcb->bottom.support.timeout = timeout;
1045 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1046
1047 return 0;
1048 }
1049
1050 /**
1051 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1052 * @ct: pointer to struct zfcp_send_ct with data for request
1053 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1054 */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1055 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1056 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1057 unsigned int timeout)
1058 {
1059 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1060 struct zfcp_fsf_req *req;
1061 int ret = -EIO;
1062
1063 spin_lock_irq(&qdio->req_q_lock);
1064 if (zfcp_qdio_sbal_get(qdio))
1065 goto out;
1066
1067 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1068 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1069
1070 if (IS_ERR(req)) {
1071 ret = PTR_ERR(req);
1072 goto out;
1073 }
1074
1075 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1076 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1077 if (ret)
1078 goto failed_send;
1079
1080 req->handler = zfcp_fsf_send_ct_handler;
1081 req->qtcb->header.port_handle = wka_port->handle;
1082 req->data = ct;
1083
1084 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1085
1086 ret = zfcp_fsf_req_send(req);
1087 if (ret)
1088 goto failed_send;
1089
1090 goto out;
1091
1092 failed_send:
1093 zfcp_fsf_req_free(req);
1094 out:
1095 spin_unlock_irq(&qdio->req_q_lock);
1096 return ret;
1097 }
1098
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1099 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1100 {
1101 struct zfcp_fsf_ct_els *send_els = req->data;
1102 struct zfcp_port *port = send_els->port;
1103 struct fsf_qtcb_header *header = &req->qtcb->header;
1104
1105 send_els->status = -EINVAL;
1106
1107 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1108 goto skip_fsfstatus;
1109
1110 switch (header->fsf_status) {
1111 case FSF_GOOD:
1112 zfcp_dbf_san_res("fsselh1", req);
1113 send_els->status = 0;
1114 break;
1115 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1116 zfcp_fsf_class_not_supp(req);
1117 break;
1118 case FSF_ADAPTER_STATUS_AVAILABLE:
1119 switch (header->fsf_status_qual.word[0]){
1120 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1121 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1122 case FSF_SQ_RETRY_IF_POSSIBLE:
1123 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1124 break;
1125 }
1126 break;
1127 case FSF_ELS_COMMAND_REJECTED:
1128 case FSF_PAYLOAD_SIZE_MISMATCH:
1129 case FSF_REQUEST_SIZE_TOO_LARGE:
1130 case FSF_RESPONSE_SIZE_TOO_LARGE:
1131 break;
1132 case FSF_ACCESS_DENIED:
1133 if (port) {
1134 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1135 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1136 }
1137 break;
1138 case FSF_SBAL_MISMATCH:
1139 /* should never occur, avoided in zfcp_fsf_send_els */
1140 /* fall through */
1141 default:
1142 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1143 break;
1144 }
1145 skip_fsfstatus:
1146 if (send_els->handler)
1147 send_els->handler(send_els->handler_data);
1148 }
1149
1150 /**
1151 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1152 * @els: pointer to struct zfcp_send_els with data for the command
1153 */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1154 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1155 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1156 {
1157 struct zfcp_fsf_req *req;
1158 struct zfcp_qdio *qdio = adapter->qdio;
1159 int ret = -EIO;
1160
1161 spin_lock_irq(&qdio->req_q_lock);
1162 if (zfcp_qdio_sbal_get(qdio))
1163 goto out;
1164
1165 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1166 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1167
1168 if (IS_ERR(req)) {
1169 ret = PTR_ERR(req);
1170 goto out;
1171 }
1172
1173 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1174
1175 if (!zfcp_adapter_multi_buffer_active(adapter))
1176 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1177
1178 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1179
1180 if (ret)
1181 goto failed_send;
1182
1183 hton24(req->qtcb->bottom.support.d_id, d_id);
1184 req->handler = zfcp_fsf_send_els_handler;
1185 req->data = els;
1186
1187 zfcp_dbf_san_req("fssels1", req, d_id);
1188
1189 ret = zfcp_fsf_req_send(req);
1190 if (ret)
1191 goto failed_send;
1192
1193 goto out;
1194
1195 failed_send:
1196 zfcp_fsf_req_free(req);
1197 out:
1198 spin_unlock_irq(&qdio->req_q_lock);
1199 return ret;
1200 }
1201
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1202 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1203 {
1204 struct zfcp_fsf_req *req;
1205 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1206 int retval = -EIO;
1207
1208 spin_lock_irq(&qdio->req_q_lock);
1209 if (zfcp_qdio_sbal_get(qdio))
1210 goto out;
1211
1212 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1213 SBAL_SFLAGS0_TYPE_READ,
1214 qdio->adapter->pool.erp_req);
1215
1216 if (IS_ERR(req)) {
1217 retval = PTR_ERR(req);
1218 goto out;
1219 }
1220
1221 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1222 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1223
1224 req->qtcb->bottom.config.feature_selection =
1225 FSF_FEATURE_CFDC |
1226 FSF_FEATURE_LUN_SHARING |
1227 FSF_FEATURE_NOTIFICATION_LOST |
1228 FSF_FEATURE_UPDATE_ALERT;
1229 req->erp_action = erp_action;
1230 req->handler = zfcp_fsf_exchange_config_data_handler;
1231 erp_action->fsf_req_id = req->req_id;
1232
1233 zfcp_fsf_start_erp_timer(req);
1234 retval = zfcp_fsf_req_send(req);
1235 if (retval) {
1236 zfcp_fsf_req_free(req);
1237 erp_action->fsf_req_id = 0;
1238 }
1239 out:
1240 spin_unlock_irq(&qdio->req_q_lock);
1241 return retval;
1242 }
1243
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1244 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1245 struct fsf_qtcb_bottom_config *data)
1246 {
1247 struct zfcp_fsf_req *req = NULL;
1248 int retval = -EIO;
1249
1250 spin_lock_irq(&qdio->req_q_lock);
1251 if (zfcp_qdio_sbal_get(qdio))
1252 goto out_unlock;
1253
1254 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1255 SBAL_SFLAGS0_TYPE_READ, NULL);
1256
1257 if (IS_ERR(req)) {
1258 retval = PTR_ERR(req);
1259 goto out_unlock;
1260 }
1261
1262 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1263 req->handler = zfcp_fsf_exchange_config_data_handler;
1264
1265 req->qtcb->bottom.config.feature_selection =
1266 FSF_FEATURE_CFDC |
1267 FSF_FEATURE_LUN_SHARING |
1268 FSF_FEATURE_NOTIFICATION_LOST |
1269 FSF_FEATURE_UPDATE_ALERT;
1270
1271 if (data)
1272 req->data = data;
1273
1274 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1275 retval = zfcp_fsf_req_send(req);
1276 spin_unlock_irq(&qdio->req_q_lock);
1277 if (!retval)
1278 wait_for_completion(&req->completion);
1279
1280 zfcp_fsf_req_free(req);
1281 return retval;
1282
1283 out_unlock:
1284 spin_unlock_irq(&qdio->req_q_lock);
1285 return retval;
1286 }
1287
1288 /**
1289 * zfcp_fsf_exchange_port_data - request information about local port
1290 * @erp_action: ERP action for the adapter for which port data is requested
1291 * Returns: 0 on success, error otherwise
1292 */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1293 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1294 {
1295 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1296 struct zfcp_fsf_req *req;
1297 int retval = -EIO;
1298
1299 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1300 return -EOPNOTSUPP;
1301
1302 spin_lock_irq(&qdio->req_q_lock);
1303 if (zfcp_qdio_sbal_get(qdio))
1304 goto out;
1305
1306 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1307 SBAL_SFLAGS0_TYPE_READ,
1308 qdio->adapter->pool.erp_req);
1309
1310 if (IS_ERR(req)) {
1311 retval = PTR_ERR(req);
1312 goto out;
1313 }
1314
1315 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1316 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1317
1318 req->handler = zfcp_fsf_exchange_port_data_handler;
1319 req->erp_action = erp_action;
1320 erp_action->fsf_req_id = req->req_id;
1321
1322 zfcp_fsf_start_erp_timer(req);
1323 retval = zfcp_fsf_req_send(req);
1324 if (retval) {
1325 zfcp_fsf_req_free(req);
1326 erp_action->fsf_req_id = 0;
1327 }
1328 out:
1329 spin_unlock_irq(&qdio->req_q_lock);
1330 return retval;
1331 }
1332
1333 /**
1334 * zfcp_fsf_exchange_port_data_sync - request information about local port
1335 * @qdio: pointer to struct zfcp_qdio
1336 * @data: pointer to struct fsf_qtcb_bottom_port
1337 * Returns: 0 on success, error otherwise
1338 */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1339 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1340 struct fsf_qtcb_bottom_port *data)
1341 {
1342 struct zfcp_fsf_req *req = NULL;
1343 int retval = -EIO;
1344
1345 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1346 return -EOPNOTSUPP;
1347
1348 spin_lock_irq(&qdio->req_q_lock);
1349 if (zfcp_qdio_sbal_get(qdio))
1350 goto out_unlock;
1351
1352 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1353 SBAL_SFLAGS0_TYPE_READ, NULL);
1354
1355 if (IS_ERR(req)) {
1356 retval = PTR_ERR(req);
1357 goto out_unlock;
1358 }
1359
1360 if (data)
1361 req->data = data;
1362
1363 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1364
1365 req->handler = zfcp_fsf_exchange_port_data_handler;
1366 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1367 retval = zfcp_fsf_req_send(req);
1368 spin_unlock_irq(&qdio->req_q_lock);
1369
1370 if (!retval)
1371 wait_for_completion(&req->completion);
1372
1373 zfcp_fsf_req_free(req);
1374
1375 return retval;
1376
1377 out_unlock:
1378 spin_unlock_irq(&qdio->req_q_lock);
1379 return retval;
1380 }
1381
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1382 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1383 {
1384 struct zfcp_port *port = req->data;
1385 struct fsf_qtcb_header *header = &req->qtcb->header;
1386 struct fc_els_flogi *plogi;
1387
1388 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1389 goto out;
1390
1391 switch (header->fsf_status) {
1392 case FSF_PORT_ALREADY_OPEN:
1393 break;
1394 case FSF_ACCESS_DENIED:
1395 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1396 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1397 break;
1398 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1399 dev_warn(&req->adapter->ccw_device->dev,
1400 "Not enough FCP adapter resources to open "
1401 "remote port 0x%016Lx\n",
1402 (unsigned long long)port->wwpn);
1403 zfcp_erp_set_port_status(port,
1404 ZFCP_STATUS_COMMON_ERP_FAILED);
1405 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1406 break;
1407 case FSF_ADAPTER_STATUS_AVAILABLE:
1408 switch (header->fsf_status_qual.word[0]) {
1409 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1410 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1411 case FSF_SQ_NO_RETRY_POSSIBLE:
1412 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1413 break;
1414 }
1415 break;
1416 case FSF_GOOD:
1417 port->handle = header->port_handle;
1418 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1419 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1420 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1421 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1422 &port->status);
1423 /* check whether D_ID has changed during open */
1424 /*
1425 * FIXME: This check is not airtight, as the FCP channel does
1426 * not monitor closures of target port connections caused on
1427 * the remote side. Thus, they might miss out on invalidating
1428 * locally cached WWPNs (and other N_Port parameters) of gone
1429 * target ports. So, our heroic attempt to make things safe
1430 * could be undermined by 'open port' response data tagged with
1431 * obsolete WWPNs. Another reason to monitor potential
1432 * connection closures ourself at least (by interpreting
1433 * incoming ELS' and unsolicited status). It just crosses my
1434 * mind that one should be able to cross-check by means of
1435 * another GID_PN straight after a port has been opened.
1436 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1437 */
1438 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1439 if (req->qtcb->bottom.support.els1_length >=
1440 FSF_PLOGI_MIN_LEN)
1441 zfcp_fc_plogi_evaluate(port, plogi);
1442 break;
1443 case FSF_UNKNOWN_OP_SUBTYPE:
1444 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1445 break;
1446 }
1447
1448 out:
1449 put_device(&port->dev);
1450 }
1451
1452 /**
1453 * zfcp_fsf_open_port - create and send open port request
1454 * @erp_action: pointer to struct zfcp_erp_action
1455 * Returns: 0 on success, error otherwise
1456 */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1457 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1458 {
1459 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1460 struct zfcp_port *port = erp_action->port;
1461 struct zfcp_fsf_req *req;
1462 int retval = -EIO;
1463
1464 spin_lock_irq(&qdio->req_q_lock);
1465 if (zfcp_qdio_sbal_get(qdio))
1466 goto out;
1467
1468 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1469 SBAL_SFLAGS0_TYPE_READ,
1470 qdio->adapter->pool.erp_req);
1471
1472 if (IS_ERR(req)) {
1473 retval = PTR_ERR(req);
1474 goto out;
1475 }
1476
1477 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1478 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1479
1480 req->handler = zfcp_fsf_open_port_handler;
1481 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1482 req->data = port;
1483 req->erp_action = erp_action;
1484 erp_action->fsf_req_id = req->req_id;
1485 get_device(&port->dev);
1486
1487 zfcp_fsf_start_erp_timer(req);
1488 retval = zfcp_fsf_req_send(req);
1489 if (retval) {
1490 zfcp_fsf_req_free(req);
1491 erp_action->fsf_req_id = 0;
1492 put_device(&port->dev);
1493 }
1494 out:
1495 spin_unlock_irq(&qdio->req_q_lock);
1496 return retval;
1497 }
1498
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1499 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1500 {
1501 struct zfcp_port *port = req->data;
1502
1503 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1504 return;
1505
1506 switch (req->qtcb->header.fsf_status) {
1507 case FSF_PORT_HANDLE_NOT_VALID:
1508 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1509 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1510 break;
1511 case FSF_ADAPTER_STATUS_AVAILABLE:
1512 break;
1513 case FSF_GOOD:
1514 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1515 break;
1516 }
1517 }
1518
1519 /**
1520 * zfcp_fsf_close_port - create and send close port request
1521 * @erp_action: pointer to struct zfcp_erp_action
1522 * Returns: 0 on success, error otherwise
1523 */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1524 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1525 {
1526 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1527 struct zfcp_fsf_req *req;
1528 int retval = -EIO;
1529
1530 spin_lock_irq(&qdio->req_q_lock);
1531 if (zfcp_qdio_sbal_get(qdio))
1532 goto out;
1533
1534 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1535 SBAL_SFLAGS0_TYPE_READ,
1536 qdio->adapter->pool.erp_req);
1537
1538 if (IS_ERR(req)) {
1539 retval = PTR_ERR(req);
1540 goto out;
1541 }
1542
1543 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1544 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1545
1546 req->handler = zfcp_fsf_close_port_handler;
1547 req->data = erp_action->port;
1548 req->erp_action = erp_action;
1549 req->qtcb->header.port_handle = erp_action->port->handle;
1550 erp_action->fsf_req_id = req->req_id;
1551
1552 zfcp_fsf_start_erp_timer(req);
1553 retval = zfcp_fsf_req_send(req);
1554 if (retval) {
1555 zfcp_fsf_req_free(req);
1556 erp_action->fsf_req_id = 0;
1557 }
1558 out:
1559 spin_unlock_irq(&qdio->req_q_lock);
1560 return retval;
1561 }
1562
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1563 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1564 {
1565 struct zfcp_fc_wka_port *wka_port = req->data;
1566 struct fsf_qtcb_header *header = &req->qtcb->header;
1567
1568 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1569 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1570 goto out;
1571 }
1572
1573 switch (header->fsf_status) {
1574 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1575 dev_warn(&req->adapter->ccw_device->dev,
1576 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1577 /* fall through */
1578 case FSF_ADAPTER_STATUS_AVAILABLE:
1579 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1580 /* fall through */
1581 case FSF_ACCESS_DENIED:
1582 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1583 break;
1584 case FSF_GOOD:
1585 wka_port->handle = header->port_handle;
1586 /* fall through */
1587 case FSF_PORT_ALREADY_OPEN:
1588 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1589 }
1590 out:
1591 wake_up(&wka_port->completion_wq);
1592 }
1593
1594 /**
1595 * zfcp_fsf_open_wka_port - create and send open wka-port request
1596 * @wka_port: pointer to struct zfcp_fc_wka_port
1597 * Returns: 0 on success, error otherwise
1598 */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1599 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1600 {
1601 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1602 struct zfcp_fsf_req *req;
1603 int retval = -EIO;
1604
1605 spin_lock_irq(&qdio->req_q_lock);
1606 if (zfcp_qdio_sbal_get(qdio))
1607 goto out;
1608
1609 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1610 SBAL_SFLAGS0_TYPE_READ,
1611 qdio->adapter->pool.erp_req);
1612
1613 if (IS_ERR(req)) {
1614 retval = PTR_ERR(req);
1615 goto out;
1616 }
1617
1618 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1619 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1620
1621 req->handler = zfcp_fsf_open_wka_port_handler;
1622 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1623 req->data = wka_port;
1624
1625 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1626 retval = zfcp_fsf_req_send(req);
1627 if (retval)
1628 zfcp_fsf_req_free(req);
1629 out:
1630 spin_unlock_irq(&qdio->req_q_lock);
1631 return retval;
1632 }
1633
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1634 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1635 {
1636 struct zfcp_fc_wka_port *wka_port = req->data;
1637
1638 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1639 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1640 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1641 }
1642
1643 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1644 wake_up(&wka_port->completion_wq);
1645 }
1646
1647 /**
1648 * zfcp_fsf_close_wka_port - create and send close wka port request
1649 * @wka_port: WKA port to open
1650 * Returns: 0 on success, error otherwise
1651 */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1652 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1653 {
1654 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1655 struct zfcp_fsf_req *req;
1656 int retval = -EIO;
1657
1658 spin_lock_irq(&qdio->req_q_lock);
1659 if (zfcp_qdio_sbal_get(qdio))
1660 goto out;
1661
1662 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1663 SBAL_SFLAGS0_TYPE_READ,
1664 qdio->adapter->pool.erp_req);
1665
1666 if (IS_ERR(req)) {
1667 retval = PTR_ERR(req);
1668 goto out;
1669 }
1670
1671 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1672 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1673
1674 req->handler = zfcp_fsf_close_wka_port_handler;
1675 req->data = wka_port;
1676 req->qtcb->header.port_handle = wka_port->handle;
1677
1678 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1679 retval = zfcp_fsf_req_send(req);
1680 if (retval)
1681 zfcp_fsf_req_free(req);
1682 out:
1683 spin_unlock_irq(&qdio->req_q_lock);
1684 return retval;
1685 }
1686
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)1687 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1688 {
1689 struct zfcp_port *port = req->data;
1690 struct fsf_qtcb_header *header = &req->qtcb->header;
1691 struct scsi_device *sdev;
1692
1693 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1694 return;
1695
1696 switch (header->fsf_status) {
1697 case FSF_PORT_HANDLE_NOT_VALID:
1698 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1699 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1700 break;
1701 case FSF_ACCESS_DENIED:
1702 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1703 break;
1704 case FSF_PORT_BOXED:
1705 /* can't use generic zfcp_erp_modify_port_status because
1706 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1707 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1708 shost_for_each_device(sdev, port->adapter->scsi_host)
1709 if (sdev_to_zfcp(sdev)->port == port)
1710 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1711 &sdev_to_zfcp(sdev)->status);
1712 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1713 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1714 "fscpph2");
1715 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1716 break;
1717 case FSF_ADAPTER_STATUS_AVAILABLE:
1718 switch (header->fsf_status_qual.word[0]) {
1719 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1720 /* fall through */
1721 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1722 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1723 break;
1724 }
1725 break;
1726 case FSF_GOOD:
1727 /* can't use generic zfcp_erp_modify_port_status because
1728 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1729 */
1730 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1731 shost_for_each_device(sdev, port->adapter->scsi_host)
1732 if (sdev_to_zfcp(sdev)->port == port)
1733 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1734 &sdev_to_zfcp(sdev)->status);
1735 break;
1736 }
1737 }
1738
1739 /**
1740 * zfcp_fsf_close_physical_port - close physical port
1741 * @erp_action: pointer to struct zfcp_erp_action
1742 * Returns: 0 on success
1743 */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)1744 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1745 {
1746 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1747 struct zfcp_fsf_req *req;
1748 int retval = -EIO;
1749
1750 spin_lock_irq(&qdio->req_q_lock);
1751 if (zfcp_qdio_sbal_get(qdio))
1752 goto out;
1753
1754 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1755 SBAL_SFLAGS0_TYPE_READ,
1756 qdio->adapter->pool.erp_req);
1757
1758 if (IS_ERR(req)) {
1759 retval = PTR_ERR(req);
1760 goto out;
1761 }
1762
1763 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1764 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1765
1766 req->data = erp_action->port;
1767 req->qtcb->header.port_handle = erp_action->port->handle;
1768 req->erp_action = erp_action;
1769 req->handler = zfcp_fsf_close_physical_port_handler;
1770 erp_action->fsf_req_id = req->req_id;
1771
1772 zfcp_fsf_start_erp_timer(req);
1773 retval = zfcp_fsf_req_send(req);
1774 if (retval) {
1775 zfcp_fsf_req_free(req);
1776 erp_action->fsf_req_id = 0;
1777 }
1778 out:
1779 spin_unlock_irq(&qdio->req_q_lock);
1780 return retval;
1781 }
1782
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)1783 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1784 {
1785 struct zfcp_adapter *adapter = req->adapter;
1786 struct scsi_device *sdev = req->data;
1787 struct zfcp_scsi_dev *zfcp_sdev;
1788 struct fsf_qtcb_header *header = &req->qtcb->header;
1789 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1790
1791 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1792 return;
1793
1794 zfcp_sdev = sdev_to_zfcp(sdev);
1795
1796 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1797 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1798 ZFCP_STATUS_LUN_SHARED |
1799 ZFCP_STATUS_LUN_READONLY,
1800 &zfcp_sdev->status);
1801
1802 switch (header->fsf_status) {
1803
1804 case FSF_PORT_HANDLE_NOT_VALID:
1805 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1806 /* fall through */
1807 case FSF_LUN_ALREADY_OPEN:
1808 break;
1809 case FSF_ACCESS_DENIED:
1810 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1811 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1812 break;
1813 case FSF_PORT_BOXED:
1814 zfcp_erp_set_port_status(zfcp_sdev->port,
1815 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1816 zfcp_erp_port_reopen(zfcp_sdev->port,
1817 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1818 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1819 break;
1820 case FSF_LUN_SHARING_VIOLATION:
1821 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1822 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1823 break;
1824 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1825 dev_warn(&adapter->ccw_device->dev,
1826 "No handle is available for LUN "
1827 "0x%016Lx on port 0x%016Lx\n",
1828 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1829 (unsigned long long)zfcp_sdev->port->wwpn);
1830 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1831 /* fall through */
1832 case FSF_INVALID_COMMAND_OPTION:
1833 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1834 break;
1835 case FSF_ADAPTER_STATUS_AVAILABLE:
1836 switch (header->fsf_status_qual.word[0]) {
1837 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1838 zfcp_fc_test_link(zfcp_sdev->port);
1839 /* fall through */
1840 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1841 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1842 break;
1843 }
1844 break;
1845
1846 case FSF_GOOD:
1847 zfcp_sdev->lun_handle = header->lun_handle;
1848 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1849 zfcp_cfdc_open_lun_eval(sdev, bottom);
1850 break;
1851 }
1852 }
1853
1854 /**
1855 * zfcp_fsf_open_lun - open LUN
1856 * @erp_action: pointer to struct zfcp_erp_action
1857 * Returns: 0 on success, error otherwise
1858 */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)1859 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1860 {
1861 struct zfcp_adapter *adapter = erp_action->adapter;
1862 struct zfcp_qdio *qdio = adapter->qdio;
1863 struct zfcp_fsf_req *req;
1864 int retval = -EIO;
1865
1866 spin_lock_irq(&qdio->req_q_lock);
1867 if (zfcp_qdio_sbal_get(qdio))
1868 goto out;
1869
1870 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1871 SBAL_SFLAGS0_TYPE_READ,
1872 adapter->pool.erp_req);
1873
1874 if (IS_ERR(req)) {
1875 retval = PTR_ERR(req);
1876 goto out;
1877 }
1878
1879 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1880 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1881
1882 req->qtcb->header.port_handle = erp_action->port->handle;
1883 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1884 req->handler = zfcp_fsf_open_lun_handler;
1885 req->data = erp_action->sdev;
1886 req->erp_action = erp_action;
1887 erp_action->fsf_req_id = req->req_id;
1888
1889 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1890 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1891
1892 zfcp_fsf_start_erp_timer(req);
1893 retval = zfcp_fsf_req_send(req);
1894 if (retval) {
1895 zfcp_fsf_req_free(req);
1896 erp_action->fsf_req_id = 0;
1897 }
1898 out:
1899 spin_unlock_irq(&qdio->req_q_lock);
1900 return retval;
1901 }
1902
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)1903 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1904 {
1905 struct scsi_device *sdev = req->data;
1906 struct zfcp_scsi_dev *zfcp_sdev;
1907
1908 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1909 return;
1910
1911 zfcp_sdev = sdev_to_zfcp(sdev);
1912
1913 switch (req->qtcb->header.fsf_status) {
1914 case FSF_PORT_HANDLE_NOT_VALID:
1915 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1916 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1917 break;
1918 case FSF_LUN_HANDLE_NOT_VALID:
1919 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1920 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1921 break;
1922 case FSF_PORT_BOXED:
1923 zfcp_erp_set_port_status(zfcp_sdev->port,
1924 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1925 zfcp_erp_port_reopen(zfcp_sdev->port,
1926 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1927 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1928 break;
1929 case FSF_ADAPTER_STATUS_AVAILABLE:
1930 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1931 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1932 zfcp_fc_test_link(zfcp_sdev->port);
1933 /* fall through */
1934 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1935 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1936 break;
1937 }
1938 break;
1939 case FSF_GOOD:
1940 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1941 break;
1942 }
1943 }
1944
1945 /**
1946 * zfcp_fsf_close_LUN - close LUN
1947 * @erp_action: pointer to erp_action triggering the "close LUN"
1948 * Returns: 0 on success, error otherwise
1949 */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)1950 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1951 {
1952 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1953 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1954 struct zfcp_fsf_req *req;
1955 int retval = -EIO;
1956
1957 spin_lock_irq(&qdio->req_q_lock);
1958 if (zfcp_qdio_sbal_get(qdio))
1959 goto out;
1960
1961 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1962 SBAL_SFLAGS0_TYPE_READ,
1963 qdio->adapter->pool.erp_req);
1964
1965 if (IS_ERR(req)) {
1966 retval = PTR_ERR(req);
1967 goto out;
1968 }
1969
1970 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1971 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1972
1973 req->qtcb->header.port_handle = erp_action->port->handle;
1974 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1975 req->handler = zfcp_fsf_close_lun_handler;
1976 req->data = erp_action->sdev;
1977 req->erp_action = erp_action;
1978 erp_action->fsf_req_id = req->req_id;
1979
1980 zfcp_fsf_start_erp_timer(req);
1981 retval = zfcp_fsf_req_send(req);
1982 if (retval) {
1983 zfcp_fsf_req_free(req);
1984 erp_action->fsf_req_id = 0;
1985 }
1986 out:
1987 spin_unlock_irq(&qdio->req_q_lock);
1988 return retval;
1989 }
1990
zfcp_fsf_update_lat(struct fsf_latency_record * lat_rec,u32 lat)1991 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1992 {
1993 lat_rec->sum += lat;
1994 lat_rec->min = min(lat_rec->min, lat);
1995 lat_rec->max = max(lat_rec->max, lat);
1996 }
1997
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)1998 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1999 {
2000 struct fsf_qual_latency_info *lat_in;
2001 struct latency_cont *lat = NULL;
2002 struct zfcp_scsi_dev *zfcp_sdev;
2003 struct zfcp_blk_drv_data blktrc;
2004 int ticks = req->adapter->timer_ticks;
2005
2006 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2007
2008 blktrc.flags = 0;
2009 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2010 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2011 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2012 blktrc.inb_usage = 0;
2013 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2014
2015 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2016 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2017 zfcp_sdev = sdev_to_zfcp(scsi->device);
2018 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2019 blktrc.channel_lat = lat_in->channel_lat * ticks;
2020 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2021
2022 switch (req->qtcb->bottom.io.data_direction) {
2023 case FSF_DATADIR_DIF_READ_STRIP:
2024 case FSF_DATADIR_DIF_READ_CONVERT:
2025 case FSF_DATADIR_READ:
2026 lat = &zfcp_sdev->latencies.read;
2027 break;
2028 case FSF_DATADIR_DIF_WRITE_INSERT:
2029 case FSF_DATADIR_DIF_WRITE_CONVERT:
2030 case FSF_DATADIR_WRITE:
2031 lat = &zfcp_sdev->latencies.write;
2032 break;
2033 case FSF_DATADIR_CMND:
2034 lat = &zfcp_sdev->latencies.cmd;
2035 break;
2036 }
2037
2038 if (lat) {
2039 spin_lock(&zfcp_sdev->latencies.lock);
2040 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2041 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2042 lat->counter++;
2043 spin_unlock(&zfcp_sdev->latencies.lock);
2044 }
2045 }
2046
2047 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2048 sizeof(blktrc));
2049 }
2050
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req)2051 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2052 {
2053 struct scsi_cmnd *scmnd = req->data;
2054 struct scsi_device *sdev = scmnd->device;
2055 struct zfcp_scsi_dev *zfcp_sdev;
2056 struct fsf_qtcb_header *header = &req->qtcb->header;
2057
2058 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2059 return;
2060
2061 zfcp_sdev = sdev_to_zfcp(sdev);
2062
2063 switch (header->fsf_status) {
2064 case FSF_HANDLE_MISMATCH:
2065 case FSF_PORT_HANDLE_NOT_VALID:
2066 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2067 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2068 break;
2069 case FSF_FCPLUN_NOT_VALID:
2070 case FSF_LUN_HANDLE_NOT_VALID:
2071 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2072 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2073 break;
2074 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2075 zfcp_fsf_class_not_supp(req);
2076 break;
2077 case FSF_ACCESS_DENIED:
2078 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2079 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2080 break;
2081 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2082 dev_err(&req->adapter->ccw_device->dev,
2083 "Incorrect direction %d, LUN 0x%016Lx on port "
2084 "0x%016Lx closed\n",
2085 req->qtcb->bottom.io.data_direction,
2086 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2087 (unsigned long long)zfcp_sdev->port->wwpn);
2088 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2089 "fssfch3");
2090 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2091 break;
2092 case FSF_CMND_LENGTH_NOT_VALID:
2093 dev_err(&req->adapter->ccw_device->dev,
2094 "Incorrect CDB length %d, LUN 0x%016Lx on "
2095 "port 0x%016Lx closed\n",
2096 req->qtcb->bottom.io.fcp_cmnd_length,
2097 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2098 (unsigned long long)zfcp_sdev->port->wwpn);
2099 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2100 "fssfch4");
2101 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2102 break;
2103 case FSF_PORT_BOXED:
2104 zfcp_erp_set_port_status(zfcp_sdev->port,
2105 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2106 zfcp_erp_port_reopen(zfcp_sdev->port,
2107 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2108 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2109 break;
2110 case FSF_LUN_BOXED:
2111 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2112 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2113 "fssfch6");
2114 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2115 break;
2116 case FSF_ADAPTER_STATUS_AVAILABLE:
2117 if (header->fsf_status_qual.word[0] ==
2118 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2119 zfcp_fc_test_link(zfcp_sdev->port);
2120 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2121 break;
2122 }
2123 }
2124
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2125 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2126 {
2127 struct scsi_cmnd *scpnt;
2128 struct fcp_resp_with_ext *fcp_rsp;
2129 unsigned long flags;
2130
2131 read_lock_irqsave(&req->adapter->abort_lock, flags);
2132
2133 scpnt = req->data;
2134 if (unlikely(!scpnt)) {
2135 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2136 return;
2137 }
2138
2139 zfcp_fsf_fcp_handler_common(req);
2140
2141 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2142 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2143 goto skip_fsfstatus;
2144 }
2145
2146 switch (req->qtcb->header.fsf_status) {
2147 case FSF_INCONSISTENT_PROT_DATA:
2148 case FSF_INVALID_PROT_PARM:
2149 set_host_byte(scpnt, DID_ERROR);
2150 goto skip_fsfstatus;
2151 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2152 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2153 goto skip_fsfstatus;
2154 case FSF_APP_TAG_CHECK_FAILURE:
2155 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2156 goto skip_fsfstatus;
2157 case FSF_REF_TAG_CHECK_FAILURE:
2158 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2159 goto skip_fsfstatus;
2160 }
2161 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2162 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2163
2164 skip_fsfstatus:
2165 zfcp_fsf_req_trace(req, scpnt);
2166 zfcp_dbf_scsi_result(scpnt, req);
2167
2168 scpnt->host_scribble = NULL;
2169 (scpnt->scsi_done) (scpnt);
2170 /*
2171 * We must hold this lock until scsi_done has been called.
2172 * Otherwise we may call scsi_done after abort regarding this
2173 * command has completed.
2174 * Note: scsi_done must not block!
2175 */
2176 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2177 }
2178
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2179 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2180 {
2181 switch (scsi_get_prot_op(scsi_cmnd)) {
2182 case SCSI_PROT_NORMAL:
2183 switch (scsi_cmnd->sc_data_direction) {
2184 case DMA_NONE:
2185 *data_dir = FSF_DATADIR_CMND;
2186 break;
2187 case DMA_FROM_DEVICE:
2188 *data_dir = FSF_DATADIR_READ;
2189 break;
2190 case DMA_TO_DEVICE:
2191 *data_dir = FSF_DATADIR_WRITE;
2192 break;
2193 case DMA_BIDIRECTIONAL:
2194 return -EINVAL;
2195 }
2196 break;
2197
2198 case SCSI_PROT_READ_STRIP:
2199 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2200 break;
2201 case SCSI_PROT_WRITE_INSERT:
2202 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2203 break;
2204 case SCSI_PROT_READ_PASS:
2205 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2206 break;
2207 case SCSI_PROT_WRITE_PASS:
2208 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2209 break;
2210 default:
2211 return -EINVAL;
2212 }
2213
2214 return 0;
2215 }
2216
2217 /**
2218 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2219 * @scsi_cmnd: scsi command to be sent
2220 */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2221 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2222 {
2223 struct zfcp_fsf_req *req;
2224 struct fcp_cmnd *fcp_cmnd;
2225 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2226 int retval = -EIO;
2227 struct scsi_device *sdev = scsi_cmnd->device;
2228 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2229 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2230 struct zfcp_qdio *qdio = adapter->qdio;
2231 struct fsf_qtcb_bottom_io *io;
2232 unsigned long flags;
2233
2234 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2235 ZFCP_STATUS_COMMON_UNBLOCKED)))
2236 return -EBUSY;
2237
2238 spin_lock_irqsave(&qdio->req_q_lock, flags);
2239 if (atomic_read(&qdio->req_q_free) <= 0) {
2240 atomic_inc(&qdio->req_q_full);
2241 goto out;
2242 }
2243
2244 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2245 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2246
2247 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2248 sbtype, adapter->pool.scsi_req);
2249
2250 if (IS_ERR(req)) {
2251 retval = PTR_ERR(req);
2252 goto out;
2253 }
2254
2255 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2256
2257 io = &req->qtcb->bottom.io;
2258 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2259 req->data = scsi_cmnd;
2260 req->handler = zfcp_fsf_fcp_cmnd_handler;
2261 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2262 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2263 io->service_class = FSF_CLASS_3;
2264 io->fcp_cmnd_length = FCP_CMND_LEN;
2265
2266 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2267 io->data_block_length = scsi_cmnd->device->sector_size;
2268 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2269 }
2270
2271 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2272 goto failed_scsi_cmnd;
2273
2274 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2275 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2276
2277 if (scsi_prot_sg_count(scsi_cmnd)) {
2278 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2279 scsi_prot_sg_count(scsi_cmnd));
2280 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2281 scsi_prot_sglist(scsi_cmnd));
2282 if (retval)
2283 goto failed_scsi_cmnd;
2284 io->prot_data_length = zfcp_qdio_real_bytes(
2285 scsi_prot_sglist(scsi_cmnd));
2286 }
2287
2288 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2289 scsi_sglist(scsi_cmnd));
2290 if (unlikely(retval))
2291 goto failed_scsi_cmnd;
2292
2293 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2294 if (zfcp_adapter_multi_buffer_active(adapter))
2295 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2296
2297 retval = zfcp_fsf_req_send(req);
2298 if (unlikely(retval))
2299 goto failed_scsi_cmnd;
2300
2301 goto out;
2302
2303 failed_scsi_cmnd:
2304 zfcp_fsf_req_free(req);
2305 scsi_cmnd->host_scribble = NULL;
2306 out:
2307 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2308 return retval;
2309 }
2310
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2311 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2312 {
2313 struct fcp_resp_with_ext *fcp_rsp;
2314 struct fcp_resp_rsp_info *rsp_info;
2315
2316 zfcp_fsf_fcp_handler_common(req);
2317
2318 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2319 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2320
2321 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2322 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2323 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2324 }
2325
2326 /**
2327 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2328 * @scmnd: SCSI command to send the task management command for
2329 * @tm_flags: unsigned byte for task management flags
2330 * Returns: on success pointer to struct fsf_req, NULL otherwise
2331 */
zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd * scmnd,u8 tm_flags)2332 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2333 u8 tm_flags)
2334 {
2335 struct zfcp_fsf_req *req = NULL;
2336 struct fcp_cmnd *fcp_cmnd;
2337 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2338 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2339
2340 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2341 ZFCP_STATUS_COMMON_UNBLOCKED)))
2342 return NULL;
2343
2344 spin_lock_irq(&qdio->req_q_lock);
2345 if (zfcp_qdio_sbal_get(qdio))
2346 goto out;
2347
2348 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2349 SBAL_SFLAGS0_TYPE_WRITE,
2350 qdio->adapter->pool.scsi_req);
2351
2352 if (IS_ERR(req)) {
2353 req = NULL;
2354 goto out;
2355 }
2356
2357 req->data = scmnd;
2358 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2359 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2360 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2361 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2362 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2363 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2364
2365 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2366
2367 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2368 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2369
2370 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2371 if (!zfcp_fsf_req_send(req))
2372 goto out;
2373
2374 zfcp_fsf_req_free(req);
2375 req = NULL;
2376 out:
2377 spin_unlock_irq(&qdio->req_q_lock);
2378 return req;
2379 }
2380
zfcp_fsf_control_file_handler(struct zfcp_fsf_req * req)2381 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2382 {
2383 }
2384
2385 /**
2386 * zfcp_fsf_control_file - control file upload/download
2387 * @adapter: pointer to struct zfcp_adapter
2388 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2389 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2390 */
zfcp_fsf_control_file(struct zfcp_adapter * adapter,struct zfcp_fsf_cfdc * fsf_cfdc)2391 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2392 struct zfcp_fsf_cfdc *fsf_cfdc)
2393 {
2394 struct zfcp_qdio *qdio = adapter->qdio;
2395 struct zfcp_fsf_req *req = NULL;
2396 struct fsf_qtcb_bottom_support *bottom;
2397 int retval = -EIO;
2398 u8 direction;
2399
2400 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2401 return ERR_PTR(-EOPNOTSUPP);
2402
2403 switch (fsf_cfdc->command) {
2404 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2405 direction = SBAL_SFLAGS0_TYPE_WRITE;
2406 break;
2407 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2408 direction = SBAL_SFLAGS0_TYPE_READ;
2409 break;
2410 default:
2411 return ERR_PTR(-EINVAL);
2412 }
2413
2414 spin_lock_irq(&qdio->req_q_lock);
2415 if (zfcp_qdio_sbal_get(qdio))
2416 goto out;
2417
2418 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
2419 if (IS_ERR(req)) {
2420 retval = -EPERM;
2421 goto out;
2422 }
2423
2424 req->handler = zfcp_fsf_control_file_handler;
2425
2426 bottom = &req->qtcb->bottom.support;
2427 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2428 bottom->option = fsf_cfdc->option;
2429
2430 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2431
2432 if (retval ||
2433 (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
2434 zfcp_fsf_req_free(req);
2435 retval = -EIO;
2436 goto out;
2437 }
2438 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2439 if (zfcp_adapter_multi_buffer_active(adapter))
2440 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2441
2442 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2443 retval = zfcp_fsf_req_send(req);
2444 out:
2445 spin_unlock_irq(&qdio->req_q_lock);
2446
2447 if (!retval) {
2448 wait_for_completion(&req->completion);
2449 return req;
2450 }
2451 return ERR_PTR(retval);
2452 }
2453
2454 /**
2455 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2456 * @adapter: pointer to struct zfcp_adapter
2457 * @sbal_idx: response queue index of SBAL to be processed
2458 */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2459 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2460 {
2461 struct zfcp_adapter *adapter = qdio->adapter;
2462 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2463 struct qdio_buffer_element *sbale;
2464 struct zfcp_fsf_req *fsf_req;
2465 unsigned long req_id;
2466 int idx;
2467
2468 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2469
2470 sbale = &sbal->element[idx];
2471 req_id = (unsigned long) sbale->addr;
2472 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2473
2474 if (!fsf_req) {
2475 /*
2476 * Unknown request means that we have potentially memory
2477 * corruption and must stop the machine immediately.
2478 */
2479 zfcp_qdio_siosl(adapter);
2480 panic("error: unknown req_id (%lx) on adapter %s.\n",
2481 req_id, dev_name(&adapter->ccw_device->dev));
2482 }
2483
2484 fsf_req->qdio_req.sbal_response = sbal_idx;
2485 zfcp_fsf_req_complete(fsf_req);
2486
2487 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2488 break;
2489 }
2490 }
2491
zfcp_fsf_get_req(struct zfcp_qdio * qdio,struct qdio_buffer * sbal)2492 struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
2493 struct qdio_buffer *sbal)
2494 {
2495 struct qdio_buffer_element *sbale = &sbal->element[0];
2496 u64 req_id = (unsigned long) sbale->addr;
2497
2498 return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
2499 }
2500