1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * CXL Flash Device Driver
4 *
5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 *
8 * Copyright (C) 2015 IBM Corporation
9 */
10
11 #include <linux/delay.h>
12 #include <linux/file.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/syscalls.h>
16 #include <asm/unaligned.h>
17
18 #include <scsi/scsi.h>
19 #include <scsi/scsi_host.h>
20 #include <scsi/scsi_cmnd.h>
21 #include <scsi/scsi_eh.h>
22 #include <uapi/scsi/cxlflash_ioctl.h>
23
24 #include "sislite.h"
25 #include "common.h"
26 #include "vlun.h"
27 #include "superpipe.h"
28
29 struct cxlflash_global global;
30
31 /**
32 * marshal_rele_to_resize() - translate release to resize structure
33 * @release: Source structure from which to translate/copy.
34 * @resize: Destination structure for the translate/copy.
35 */
marshal_rele_to_resize(struct dk_cxlflash_release * release,struct dk_cxlflash_resize * resize)36 static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
37 struct dk_cxlflash_resize *resize)
38 {
39 resize->hdr = release->hdr;
40 resize->context_id = release->context_id;
41 resize->rsrc_handle = release->rsrc_handle;
42 }
43
44 /**
45 * marshal_det_to_rele() - translate detach to release structure
46 * @detach: Destination structure for the translate/copy.
47 * @release: Source structure from which to translate/copy.
48 */
marshal_det_to_rele(struct dk_cxlflash_detach * detach,struct dk_cxlflash_release * release)49 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
50 struct dk_cxlflash_release *release)
51 {
52 release->hdr = detach->hdr;
53 release->context_id = detach->context_id;
54 }
55
56 /**
57 * marshal_udir_to_rele() - translate udirect to release structure
58 * @udirect: Source structure from which to translate/copy.
59 * @release: Destination structure for the translate/copy.
60 */
marshal_udir_to_rele(struct dk_cxlflash_udirect * udirect,struct dk_cxlflash_release * release)61 static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
62 struct dk_cxlflash_release *release)
63 {
64 release->hdr = udirect->hdr;
65 release->context_id = udirect->context_id;
66 release->rsrc_handle = udirect->rsrc_handle;
67 }
68
69 /**
70 * cxlflash_free_errpage() - frees resources associated with global error page
71 */
cxlflash_free_errpage(void)72 void cxlflash_free_errpage(void)
73 {
74
75 mutex_lock(&global.mutex);
76 if (global.err_page) {
77 __free_page(global.err_page);
78 global.err_page = NULL;
79 }
80 mutex_unlock(&global.mutex);
81 }
82
83 /**
84 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
85 * @cfg: Internal structure associated with the host.
86 *
87 * When the host needs to go down, all users must be quiesced and their
88 * memory freed. This is accomplished by putting the contexts in error
89 * state which will notify the user and let them 'drive' the tear down.
90 * Meanwhile, this routine camps until all user contexts have been removed.
91 *
92 * Note that the main loop in this routine will always execute at least once
93 * to flush the reset_waitq.
94 */
cxlflash_stop_term_user_contexts(struct cxlflash_cfg * cfg)95 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
96 {
97 struct device *dev = &cfg->dev->dev;
98 int i, found = true;
99
100 cxlflash_mark_contexts_error(cfg);
101
102 while (true) {
103 for (i = 0; i < MAX_CONTEXT; i++)
104 if (cfg->ctx_tbl[i]) {
105 found = true;
106 break;
107 }
108
109 if (!found && list_empty(&cfg->ctx_err_recovery))
110 return;
111
112 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
113 __func__);
114 wake_up_all(&cfg->reset_waitq);
115 ssleep(1);
116 found = false;
117 }
118 }
119
120 /**
121 * find_error_context() - locates a context by cookie on the error recovery list
122 * @cfg: Internal structure associated with the host.
123 * @rctxid: Desired context by id.
124 * @file: Desired context by file.
125 *
126 * Return: Found context on success, NULL on failure
127 */
find_error_context(struct cxlflash_cfg * cfg,u64 rctxid,struct file * file)128 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
129 struct file *file)
130 {
131 struct ctx_info *ctxi;
132
133 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
134 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
135 return ctxi;
136
137 return NULL;
138 }
139
140 /**
141 * get_context() - obtains a validated and locked context reference
142 * @cfg: Internal structure associated with the host.
143 * @rctxid: Desired context (raw, un-decoded format).
144 * @arg: LUN information or file associated with request.
145 * @ctx_ctrl: Control information to 'steer' desired lookup.
146 *
147 * NOTE: despite the name pid, in linux, current->pid actually refers
148 * to the lightweight process id (tid) and can change if the process is
149 * multi threaded. The tgid remains constant for the process and only changes
150 * when the process of fork. For all intents and purposes, think of tgid
151 * as a pid in the traditional sense.
152 *
153 * Return: Validated context on success, NULL on failure
154 */
get_context(struct cxlflash_cfg * cfg,u64 rctxid,void * arg,enum ctx_ctrl ctx_ctrl)155 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
156 void *arg, enum ctx_ctrl ctx_ctrl)
157 {
158 struct device *dev = &cfg->dev->dev;
159 struct ctx_info *ctxi = NULL;
160 struct lun_access *lun_access = NULL;
161 struct file *file = NULL;
162 struct llun_info *lli = arg;
163 u64 ctxid = DECODE_CTXID(rctxid);
164 int rc;
165 pid_t pid = task_tgid_nr(current), ctxpid = 0;
166
167 if (ctx_ctrl & CTX_CTRL_FILE) {
168 lli = NULL;
169 file = (struct file *)arg;
170 }
171
172 if (ctx_ctrl & CTX_CTRL_CLONE)
173 pid = task_ppid_nr(current);
174
175 if (likely(ctxid < MAX_CONTEXT)) {
176 while (true) {
177 mutex_lock(&cfg->ctx_tbl_list_mutex);
178 ctxi = cfg->ctx_tbl[ctxid];
179 if (ctxi)
180 if ((file && (ctxi->file != file)) ||
181 (!file && (ctxi->ctxid != rctxid)))
182 ctxi = NULL;
183
184 if ((ctx_ctrl & CTX_CTRL_ERR) ||
185 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
186 ctxi = find_error_context(cfg, rctxid, file);
187 if (!ctxi) {
188 mutex_unlock(&cfg->ctx_tbl_list_mutex);
189 goto out;
190 }
191
192 /*
193 * Need to acquire ownership of the context while still
194 * under the table/list lock to serialize with a remove
195 * thread. Use the 'try' to avoid stalling the
196 * table/list lock for a single context.
197 *
198 * Note that the lock order is:
199 *
200 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
201 *
202 * Therefore release ctx_tbl_list_mutex before retrying.
203 */
204 rc = mutex_trylock(&ctxi->mutex);
205 mutex_unlock(&cfg->ctx_tbl_list_mutex);
206 if (rc)
207 break; /* got the context's lock! */
208 }
209
210 if (ctxi->unavail)
211 goto denied;
212
213 ctxpid = ctxi->pid;
214 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
215 if (pid != ctxpid)
216 goto denied;
217
218 if (lli) {
219 list_for_each_entry(lun_access, &ctxi->luns, list)
220 if (lun_access->lli == lli)
221 goto out;
222 goto denied;
223 }
224 }
225
226 out:
227 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
228 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
229 ctx_ctrl);
230
231 return ctxi;
232
233 denied:
234 mutex_unlock(&ctxi->mutex);
235 ctxi = NULL;
236 goto out;
237 }
238
239 /**
240 * put_context() - release a context that was retrieved from get_context()
241 * @ctxi: Context to release.
242 *
243 * For now, releasing the context equates to unlocking it's mutex.
244 */
put_context(struct ctx_info * ctxi)245 void put_context(struct ctx_info *ctxi)
246 {
247 mutex_unlock(&ctxi->mutex);
248 }
249
250 /**
251 * afu_attach() - attach a context to the AFU
252 * @cfg: Internal structure associated with the host.
253 * @ctxi: Context to attach.
254 *
255 * Upon setting the context capabilities, they must be confirmed with
256 * a read back operation as the context might have been closed since
257 * the mailbox was unlocked. When this occurs, registration is failed.
258 *
259 * Return: 0 on success, -errno on failure
260 */
afu_attach(struct cxlflash_cfg * cfg,struct ctx_info * ctxi)261 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
262 {
263 struct device *dev = &cfg->dev->dev;
264 struct afu *afu = cfg->afu;
265 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
266 int rc = 0;
267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
268 u64 val;
269 int i;
270
271 /* Unlock cap and restrict user to read/write cmds in translated mode */
272 readq_be(&ctrl_map->mbox_r);
273 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
274 writeq_be(val, &ctrl_map->ctx_cap);
275 val = readq_be(&ctrl_map->ctx_cap);
276 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
277 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
278 __func__, val);
279 rc = -EAGAIN;
280 goto out;
281 }
282
283 if (afu_is_ocxl_lisn(afu)) {
284 /* Set up the LISN effective address for each interrupt */
285 for (i = 0; i < ctxi->irqs; i++) {
286 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
287 writeq_be(val, &ctrl_map->lisn_ea[i]);
288 }
289
290 /* Use primary HWQ PASID as identifier for all interrupts */
291 val = hwq->ctx_hndl;
292 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
293 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
294 }
295
296 /* Set up MMIO registers pointing to the RHT */
297 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
299 writeq_be(val, &ctrl_map->rht_cnt_id);
300 out:
301 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
302 return rc;
303 }
304
305 /**
306 * read_cap16() - issues a SCSI READ_CAP16 command
307 * @sdev: SCSI device associated with LUN.
308 * @lli: LUN destined for capacity request.
309 *
310 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
311 * in scsi_execute(), the EEH handler will attempt to recover. As part of the
312 * recovery, the handler drains all currently running ioctls, waiting until they
313 * have completed before proceeding with a reset. As this routine is used on the
314 * ioctl path, this can create a condition where the EEH handler becomes stuck,
315 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
316 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
317 * This will allow the EEH handler to proceed with a recovery while this thread
318 * is still running. Once the scsi_execute() returns, reacquire the ioctl read
319 * semaphore and check the adapter state in case it changed while inside of
320 * scsi_execute(). The state check will wait if the adapter is still being
321 * recovered or return a failure if the recovery failed. In the event that the
322 * adapter reset failed, simply return the failure as the ioctl would be unable
323 * to continue.
324 *
325 * Note that the above puts a requirement on this routine to only be called on
326 * an ioctl thread.
327 *
328 * Return: 0 on success, -errno on failure
329 */
read_cap16(struct scsi_device * sdev,struct llun_info * lli)330 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
331 {
332 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
333 struct device *dev = &cfg->dev->dev;
334 struct glun_info *gli = lli->parent;
335 struct scsi_sense_hdr sshdr;
336 u8 *cmd_buf = NULL;
337 u8 *scsi_cmd = NULL;
338 int rc = 0;
339 int result = 0;
340 int retry_cnt = 0;
341 u32 to = CMD_TIMEOUT * HZ;
342
343 retry:
344 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
345 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
346 if (unlikely(!cmd_buf || !scsi_cmd)) {
347 rc = -ENOMEM;
348 goto out;
349 }
350
351 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
352 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
353 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
354
355 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
356 retry_cnt ? "re" : "", scsi_cmd[0]);
357
358 /* Drop the ioctl read semahpore across lengthy call */
359 up_read(&cfg->ioctl_rwsem);
360 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
361 CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
362 0, 0, NULL);
363 down_read(&cfg->ioctl_rwsem);
364 rc = check_state(cfg);
365 if (rc) {
366 dev_err(dev, "%s: Failed state result=%08x\n",
367 __func__, result);
368 rc = -ENODEV;
369 goto out;
370 }
371
372 if (result > 0 && scsi_sense_valid(&sshdr)) {
373 if (result & SAM_STAT_CHECK_CONDITION) {
374 switch (sshdr.sense_key) {
375 case NO_SENSE:
376 case RECOVERED_ERROR:
377 case NOT_READY:
378 result &= ~SAM_STAT_CHECK_CONDITION;
379 break;
380 case UNIT_ATTENTION:
381 switch (sshdr.asc) {
382 case 0x29: /* Power on Reset or Device Reset */
383 fallthrough;
384 case 0x2A: /* Device capacity changed */
385 case 0x3F: /* Report LUNs changed */
386 /* Retry the command once more */
387 if (retry_cnt++ < 1) {
388 kfree(cmd_buf);
389 kfree(scsi_cmd);
390 goto retry;
391 }
392 }
393 break;
394 default:
395 break;
396 }
397 }
398 }
399
400 if (result) {
401 dev_err(dev, "%s: command failed, result=%08x\n",
402 __func__, result);
403 rc = -EIO;
404 goto out;
405 }
406
407 /*
408 * Read cap was successful, grab values from the buffer;
409 * note that we don't need to worry about unaligned access
410 * as the buffer is allocated on an aligned boundary.
411 */
412 mutex_lock(&gli->mutex);
413 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
414 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
415 mutex_unlock(&gli->mutex);
416
417 out:
418 kfree(cmd_buf);
419 kfree(scsi_cmd);
420
421 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
422 __func__, gli->max_lba, gli->blk_len, rc);
423 return rc;
424 }
425
426 /**
427 * get_rhte() - obtains validated resource handle table entry reference
428 * @ctxi: Context owning the resource handle.
429 * @rhndl: Resource handle associated with entry.
430 * @lli: LUN associated with request.
431 *
432 * Return: Validated RHTE on success, NULL on failure
433 */
get_rhte(struct ctx_info * ctxi,res_hndl_t rhndl,struct llun_info * lli)434 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
435 struct llun_info *lli)
436 {
437 struct cxlflash_cfg *cfg = ctxi->cfg;
438 struct device *dev = &cfg->dev->dev;
439 struct sisl_rht_entry *rhte = NULL;
440
441 if (unlikely(!ctxi->rht_start)) {
442 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
443 __func__);
444 goto out;
445 }
446
447 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
448 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
449 __func__, rhndl);
450 goto out;
451 }
452
453 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
454 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
455 __func__, rhndl);
456 goto out;
457 }
458
459 rhte = &ctxi->rht_start[rhndl];
460 if (unlikely(rhte->nmask == 0)) {
461 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
462 __func__, rhndl);
463 rhte = NULL;
464 goto out;
465 }
466
467 out:
468 return rhte;
469 }
470
471 /**
472 * rhte_checkout() - obtains free/empty resource handle table entry
473 * @ctxi: Context owning the resource handle.
474 * @lli: LUN associated with request.
475 *
476 * Return: Free RHTE on success, NULL on failure
477 */
rhte_checkout(struct ctx_info * ctxi,struct llun_info * lli)478 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
479 struct llun_info *lli)
480 {
481 struct cxlflash_cfg *cfg = ctxi->cfg;
482 struct device *dev = &cfg->dev->dev;
483 struct sisl_rht_entry *rhte = NULL;
484 int i;
485
486 /* Find a free RHT entry */
487 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
488 if (ctxi->rht_start[i].nmask == 0) {
489 rhte = &ctxi->rht_start[i];
490 ctxi->rht_out++;
491 break;
492 }
493
494 if (likely(rhte))
495 ctxi->rht_lun[i] = lli;
496
497 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
498 return rhte;
499 }
500
501 /**
502 * rhte_checkin() - releases a resource handle table entry
503 * @ctxi: Context owning the resource handle.
504 * @rhte: RHTE to release.
505 */
rhte_checkin(struct ctx_info * ctxi,struct sisl_rht_entry * rhte)506 void rhte_checkin(struct ctx_info *ctxi,
507 struct sisl_rht_entry *rhte)
508 {
509 u32 rsrc_handle = rhte - ctxi->rht_start;
510
511 rhte->nmask = 0;
512 rhte->fp = 0;
513 ctxi->rht_out--;
514 ctxi->rht_lun[rsrc_handle] = NULL;
515 ctxi->rht_needs_ws[rsrc_handle] = false;
516 }
517
518 /**
519 * rht_format1() - populates a RHTE for format 1
520 * @rhte: RHTE to populate.
521 * @lun_id: LUN ID of LUN associated with RHTE.
522 * @perm: Desired permissions for RHTE.
523 * @port_sel: Port selection mask
524 */
rht_format1(struct sisl_rht_entry * rhte,u64 lun_id,u32 perm,u32 port_sel)525 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
526 u32 port_sel)
527 {
528 /*
529 * Populate the Format 1 RHT entry for direct access (physical
530 * LUN) using the synchronization sequence defined in the
531 * SISLite specification.
532 */
533 struct sisl_rht_entry_f1 dummy = { 0 };
534 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
535
536 memset(rhte_f1, 0, sizeof(*rhte_f1));
537 rhte_f1->fp = SISL_RHT_FP(1U, 0);
538 dma_wmb(); /* Make setting of format bit visible */
539
540 rhte_f1->lun_id = lun_id;
541 dma_wmb(); /* Make setting of LUN id visible */
542
543 /*
544 * Use a dummy RHT Format 1 entry to build the second dword
545 * of the entry that must be populated in a single write when
546 * enabled (valid bit set to TRUE).
547 */
548 dummy.valid = 0x80;
549 dummy.fp = SISL_RHT_FP(1U, perm);
550 dummy.port_sel = port_sel;
551 rhte_f1->dw = dummy.dw;
552
553 dma_wmb(); /* Make remaining RHT entry fields visible */
554 }
555
556 /**
557 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
558 * @gli: LUN to attach.
559 * @mode: Desired mode of the LUN.
560 * @locked: Mutex status on current thread.
561 *
562 * Return: 0 on success, -errno on failure
563 */
cxlflash_lun_attach(struct glun_info * gli,enum lun_mode mode,bool locked)564 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
565 {
566 int rc = 0;
567
568 if (!locked)
569 mutex_lock(&gli->mutex);
570
571 if (gli->mode == MODE_NONE)
572 gli->mode = mode;
573 else if (gli->mode != mode) {
574 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
575 __func__, gli->mode, mode);
576 rc = -EINVAL;
577 goto out;
578 }
579
580 gli->users++;
581 WARN_ON(gli->users <= 0);
582 out:
583 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
584 __func__, rc, gli->mode, gli->users);
585 if (!locked)
586 mutex_unlock(&gli->mutex);
587 return rc;
588 }
589
590 /**
591 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
592 * @gli: LUN to detach.
593 *
594 * When resetting the mode, terminate block allocation resources as they
595 * are no longer required (service is safe to call even when block allocation
596 * resources were not present - such as when transitioning from physical mode).
597 * These resources will be reallocated when needed (subsequent transition to
598 * virtual mode).
599 */
cxlflash_lun_detach(struct glun_info * gli)600 void cxlflash_lun_detach(struct glun_info *gli)
601 {
602 mutex_lock(&gli->mutex);
603 WARN_ON(gli->mode == MODE_NONE);
604 if (--gli->users == 0) {
605 gli->mode = MODE_NONE;
606 cxlflash_ba_terminate(&gli->blka.ba_lun);
607 }
608 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
609 WARN_ON(gli->users < 0);
610 mutex_unlock(&gli->mutex);
611 }
612
613 /**
614 * _cxlflash_disk_release() - releases the specified resource entry
615 * @sdev: SCSI device associated with LUN.
616 * @ctxi: Context owning resources.
617 * @release: Release ioctl data structure.
618 *
619 * For LUNs in virtual mode, the virtual LUN associated with the specified
620 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
621 * AFU sync should _not_ be performed when the context is sitting on the error
622 * recovery list. A context on the error recovery list is not known to the AFU
623 * due to reset. When the context is recovered, it will be reattached and made
624 * known again to the AFU.
625 *
626 * Return: 0 on success, -errno on failure
627 */
_cxlflash_disk_release(struct scsi_device * sdev,struct ctx_info * ctxi,struct dk_cxlflash_release * release)628 int _cxlflash_disk_release(struct scsi_device *sdev,
629 struct ctx_info *ctxi,
630 struct dk_cxlflash_release *release)
631 {
632 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
633 struct device *dev = &cfg->dev->dev;
634 struct llun_info *lli = sdev->hostdata;
635 struct glun_info *gli = lli->parent;
636 struct afu *afu = cfg->afu;
637 bool put_ctx = false;
638
639 struct dk_cxlflash_resize size;
640 res_hndl_t rhndl = release->rsrc_handle;
641
642 int rc = 0;
643 int rcr = 0;
644 u64 ctxid = DECODE_CTXID(release->context_id),
645 rctxid = release->context_id;
646
647 struct sisl_rht_entry *rhte;
648 struct sisl_rht_entry_f1 *rhte_f1;
649
650 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
651 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
652
653 if (!ctxi) {
654 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
655 if (unlikely(!ctxi)) {
656 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
657 __func__, ctxid);
658 rc = -EINVAL;
659 goto out;
660 }
661
662 put_ctx = true;
663 }
664
665 rhte = get_rhte(ctxi, rhndl, lli);
666 if (unlikely(!rhte)) {
667 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
668 __func__, rhndl);
669 rc = -EINVAL;
670 goto out;
671 }
672
673 /*
674 * Resize to 0 for virtual LUNS by setting the size
675 * to 0. This will clear LXT_START and LXT_CNT fields
676 * in the RHT entry and properly sync with the AFU.
677 *
678 * Afterwards we clear the remaining fields.
679 */
680 switch (gli->mode) {
681 case MODE_VIRTUAL:
682 marshal_rele_to_resize(release, &size);
683 size.req_size = 0;
684 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
685 if (rc) {
686 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
687 goto out;
688 }
689
690 break;
691 case MODE_PHYSICAL:
692 /*
693 * Clear the Format 1 RHT entry for direct access
694 * (physical LUN) using the synchronization sequence
695 * defined in the SISLite specification.
696 */
697 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
698
699 rhte_f1->valid = 0;
700 dma_wmb(); /* Make revocation of RHT entry visible */
701
702 rhte_f1->lun_id = 0;
703 dma_wmb(); /* Make clearing of LUN id visible */
704
705 rhte_f1->dw = 0;
706 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
707
708 if (!ctxi->err_recovery_active) {
709 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
710 if (unlikely(rcr))
711 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
712 __func__, rcr);
713 }
714 break;
715 default:
716 WARN(1, "Unsupported LUN mode!");
717 goto out;
718 }
719
720 rhte_checkin(ctxi, rhte);
721 cxlflash_lun_detach(gli);
722
723 out:
724 if (put_ctx)
725 put_context(ctxi);
726 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
727 return rc;
728 }
729
cxlflash_disk_release(struct scsi_device * sdev,struct dk_cxlflash_release * release)730 int cxlflash_disk_release(struct scsi_device *sdev,
731 struct dk_cxlflash_release *release)
732 {
733 return _cxlflash_disk_release(sdev, NULL, release);
734 }
735
736 /**
737 * destroy_context() - releases a context
738 * @cfg: Internal structure associated with the host.
739 * @ctxi: Context to release.
740 *
741 * This routine is safe to be called with a a non-initialized context.
742 * Also note that the routine conditionally checks for the existence
743 * of the context control map before clearing the RHT registers and
744 * context capabilities because it is possible to destroy a context
745 * while the context is in the error state (previous mapping was
746 * removed [so there is no need to worry about clearing] and context
747 * is waiting for a new mapping).
748 */
destroy_context(struct cxlflash_cfg * cfg,struct ctx_info * ctxi)749 static void destroy_context(struct cxlflash_cfg *cfg,
750 struct ctx_info *ctxi)
751 {
752 struct afu *afu = cfg->afu;
753
754 if (ctxi->initialized) {
755 WARN_ON(!list_empty(&ctxi->luns));
756
757 /* Clear RHT registers and drop all capabilities for context */
758 if (afu->afu_map && ctxi->ctrl_map) {
759 writeq_be(0, &ctxi->ctrl_map->rht_start);
760 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
761 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
762 }
763 }
764
765 /* Free memory associated with context */
766 free_page((ulong)ctxi->rht_start);
767 kfree(ctxi->rht_needs_ws);
768 kfree(ctxi->rht_lun);
769 kfree(ctxi);
770 }
771
772 /**
773 * create_context() - allocates and initializes a context
774 * @cfg: Internal structure associated with the host.
775 *
776 * Return: Allocated context on success, NULL on failure
777 */
create_context(struct cxlflash_cfg * cfg)778 static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
779 {
780 struct device *dev = &cfg->dev->dev;
781 struct ctx_info *ctxi = NULL;
782 struct llun_info **lli = NULL;
783 u8 *ws = NULL;
784 struct sisl_rht_entry *rhte;
785
786 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
787 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
788 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
789 if (unlikely(!ctxi || !lli || !ws)) {
790 dev_err(dev, "%s: Unable to allocate context\n", __func__);
791 goto err;
792 }
793
794 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
795 if (unlikely(!rhte)) {
796 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
797 goto err;
798 }
799
800 ctxi->rht_lun = lli;
801 ctxi->rht_needs_ws = ws;
802 ctxi->rht_start = rhte;
803 out:
804 return ctxi;
805
806 err:
807 kfree(ws);
808 kfree(lli);
809 kfree(ctxi);
810 ctxi = NULL;
811 goto out;
812 }
813
814 /**
815 * init_context() - initializes a previously allocated context
816 * @ctxi: Previously allocated context
817 * @cfg: Internal structure associated with the host.
818 * @ctx: Previously obtained context cookie.
819 * @ctxid: Previously obtained process element associated with CXL context.
820 * @file: Previously obtained file associated with CXL context.
821 * @perms: User-specified permissions.
822 * @irqs: User-specified number of interrupts.
823 */
init_context(struct ctx_info * ctxi,struct cxlflash_cfg * cfg,void * ctx,int ctxid,struct file * file,u32 perms,u64 irqs)824 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
825 void *ctx, int ctxid, struct file *file, u32 perms,
826 u64 irqs)
827 {
828 struct afu *afu = cfg->afu;
829
830 ctxi->rht_perms = perms;
831 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
832 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
833 ctxi->irqs = irqs;
834 ctxi->pid = task_tgid_nr(current); /* tgid = pid */
835 ctxi->ctx = ctx;
836 ctxi->cfg = cfg;
837 ctxi->file = file;
838 ctxi->initialized = true;
839 mutex_init(&ctxi->mutex);
840 kref_init(&ctxi->kref);
841 INIT_LIST_HEAD(&ctxi->luns);
842 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
843 }
844
845 /**
846 * remove_context() - context kref release handler
847 * @kref: Kernel reference associated with context to be removed.
848 *
849 * When a context no longer has any references it can safely be removed
850 * from global access and destroyed. Note that it is assumed the thread
851 * relinquishing access to the context holds its mutex.
852 */
remove_context(struct kref * kref)853 static void remove_context(struct kref *kref)
854 {
855 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
856 struct cxlflash_cfg *cfg = ctxi->cfg;
857 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
858
859 /* Remove context from table/error list */
860 WARN_ON(!mutex_is_locked(&ctxi->mutex));
861 ctxi->unavail = true;
862 mutex_unlock(&ctxi->mutex);
863 mutex_lock(&cfg->ctx_tbl_list_mutex);
864 mutex_lock(&ctxi->mutex);
865
866 if (!list_empty(&ctxi->list))
867 list_del(&ctxi->list);
868 cfg->ctx_tbl[ctxid] = NULL;
869 mutex_unlock(&cfg->ctx_tbl_list_mutex);
870 mutex_unlock(&ctxi->mutex);
871
872 /* Context now completely uncoupled/unreachable */
873 destroy_context(cfg, ctxi);
874 }
875
876 /**
877 * _cxlflash_disk_detach() - detaches a LUN from a context
878 * @sdev: SCSI device associated with LUN.
879 * @ctxi: Context owning resources.
880 * @detach: Detach ioctl data structure.
881 *
882 * As part of the detach, all per-context resources associated with the LUN
883 * are cleaned up. When detaching the last LUN for a context, the context
884 * itself is cleaned up and released.
885 *
886 * Return: 0 on success, -errno on failure
887 */
_cxlflash_disk_detach(struct scsi_device * sdev,struct ctx_info * ctxi,struct dk_cxlflash_detach * detach)888 static int _cxlflash_disk_detach(struct scsi_device *sdev,
889 struct ctx_info *ctxi,
890 struct dk_cxlflash_detach *detach)
891 {
892 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
893 struct device *dev = &cfg->dev->dev;
894 struct llun_info *lli = sdev->hostdata;
895 struct lun_access *lun_access, *t;
896 struct dk_cxlflash_release rel;
897 bool put_ctx = false;
898
899 int i;
900 int rc = 0;
901 u64 ctxid = DECODE_CTXID(detach->context_id),
902 rctxid = detach->context_id;
903
904 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
905
906 if (!ctxi) {
907 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
908 if (unlikely(!ctxi)) {
909 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
910 __func__, ctxid);
911 rc = -EINVAL;
912 goto out;
913 }
914
915 put_ctx = true;
916 }
917
918 /* Cleanup outstanding resources tied to this LUN */
919 if (ctxi->rht_out) {
920 marshal_det_to_rele(detach, &rel);
921 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
922 if (ctxi->rht_lun[i] == lli) {
923 rel.rsrc_handle = i;
924 _cxlflash_disk_release(sdev, ctxi, &rel);
925 }
926
927 /* No need to loop further if we're done */
928 if (ctxi->rht_out == 0)
929 break;
930 }
931 }
932
933 /* Take our LUN out of context, free the node */
934 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
935 if (lun_access->lli == lli) {
936 list_del(&lun_access->list);
937 kfree(lun_access);
938 lun_access = NULL;
939 break;
940 }
941
942 /*
943 * Release the context reference and the sdev reference that
944 * bound this LUN to the context.
945 */
946 if (kref_put(&ctxi->kref, remove_context))
947 put_ctx = false;
948 scsi_device_put(sdev);
949 out:
950 if (put_ctx)
951 put_context(ctxi);
952 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
953 return rc;
954 }
955
cxlflash_disk_detach(struct scsi_device * sdev,struct dk_cxlflash_detach * detach)956 static int cxlflash_disk_detach(struct scsi_device *sdev,
957 struct dk_cxlflash_detach *detach)
958 {
959 return _cxlflash_disk_detach(sdev, NULL, detach);
960 }
961
962 /**
963 * cxlflash_cxl_release() - release handler for adapter file descriptor
964 * @inode: File-system inode associated with fd.
965 * @file: File installed with adapter file descriptor.
966 *
967 * This routine is the release handler for the fops registered with
968 * the CXL services on an initial attach for a context. It is called
969 * when a close (explicity by the user or as part of a process tear
970 * down) is performed on the adapter file descriptor returned to the
971 * user. The user should be aware that explicitly performing a close
972 * considered catastrophic and subsequent usage of the superpipe API
973 * with previously saved off tokens will fail.
974 *
975 * This routine derives the context reference and calls detach for
976 * each LUN associated with the context.The final detach operation
977 * causes the context itself to be freed. With exception to when the
978 * CXL process element (context id) lookup fails (a case that should
979 * theoretically never occur), every call into this routine results
980 * in a complete freeing of a context.
981 *
982 * Detaching the LUN is typically an ioctl() operation and the underlying
983 * code assumes that ioctl_rwsem has been acquired as a reader. To support
984 * that design point, the semaphore is acquired and released around detach.
985 *
986 * Return: 0 on success
987 */
cxlflash_cxl_release(struct inode * inode,struct file * file)988 static int cxlflash_cxl_release(struct inode *inode, struct file *file)
989 {
990 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
991 cxl_fops);
992 void *ctx = cfg->ops->fops_get_context(file);
993 struct device *dev = &cfg->dev->dev;
994 struct ctx_info *ctxi = NULL;
995 struct dk_cxlflash_detach detach = { { 0 }, 0 };
996 struct lun_access *lun_access, *t;
997 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
998 int ctxid;
999
1000 ctxid = cfg->ops->process_element(ctx);
1001 if (unlikely(ctxid < 0)) {
1002 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1003 __func__, ctx, ctxid);
1004 goto out;
1005 }
1006
1007 ctxi = get_context(cfg, ctxid, file, ctrl);
1008 if (unlikely(!ctxi)) {
1009 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
1010 if (!ctxi) {
1011 dev_dbg(dev, "%s: ctxid=%d already free\n",
1012 __func__, ctxid);
1013 goto out_release;
1014 }
1015
1016 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1017 __func__, ctxid);
1018 put_context(ctxi);
1019 goto out;
1020 }
1021
1022 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1023
1024 down_read(&cfg->ioctl_rwsem);
1025 detach.context_id = ctxi->ctxid;
1026 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1027 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1028 up_read(&cfg->ioctl_rwsem);
1029 out_release:
1030 cfg->ops->fd_release(inode, file);
1031 out:
1032 dev_dbg(dev, "%s: returning\n", __func__);
1033 return 0;
1034 }
1035
1036 /**
1037 * unmap_context() - clears a previously established mapping
1038 * @ctxi: Context owning the mapping.
1039 *
1040 * This routine is used to switch between the error notification page
1041 * (dummy page of all 1's) and the real mapping (established by the CXL
1042 * fault handler).
1043 */
unmap_context(struct ctx_info * ctxi)1044 static void unmap_context(struct ctx_info *ctxi)
1045 {
1046 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1047 }
1048
1049 /**
1050 * get_err_page() - obtains and allocates the error notification page
1051 * @cfg: Internal structure associated with the host.
1052 *
1053 * Return: error notification page on success, NULL on failure
1054 */
get_err_page(struct cxlflash_cfg * cfg)1055 static struct page *get_err_page(struct cxlflash_cfg *cfg)
1056 {
1057 struct page *err_page = global.err_page;
1058 struct device *dev = &cfg->dev->dev;
1059
1060 if (unlikely(!err_page)) {
1061 err_page = alloc_page(GFP_KERNEL);
1062 if (unlikely(!err_page)) {
1063 dev_err(dev, "%s: Unable to allocate err_page\n",
1064 __func__);
1065 goto out;
1066 }
1067
1068 memset(page_address(err_page), -1, PAGE_SIZE);
1069
1070 /* Serialize update w/ other threads to avoid a leak */
1071 mutex_lock(&global.mutex);
1072 if (likely(!global.err_page))
1073 global.err_page = err_page;
1074 else {
1075 __free_page(err_page);
1076 err_page = global.err_page;
1077 }
1078 mutex_unlock(&global.mutex);
1079 }
1080
1081 out:
1082 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1083 return err_page;
1084 }
1085
1086 /**
1087 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1088 * @vmf: VM fault associated with current fault.
1089 *
1090 * To support error notification via MMIO, faults are 'caught' by this routine
1091 * that was inserted before passing back the adapter file descriptor on attach.
1092 * When a fault occurs, this routine evaluates if error recovery is active and
1093 * if so, installs the error page to 'notify' the user about the error state.
1094 * During normal operation, the fault is simply handled by the original fault
1095 * handler that was installed by CXL services as part of initializing the
1096 * adapter file descriptor. The VMA's page protection bits are toggled to
1097 * indicate cached/not-cached depending on the memory backing the fault.
1098 *
1099 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1100 */
cxlflash_mmap_fault(struct vm_fault * vmf)1101 static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
1102 {
1103 struct vm_area_struct *vma = vmf->vma;
1104 struct file *file = vma->vm_file;
1105 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1106 cxl_fops);
1107 void *ctx = cfg->ops->fops_get_context(file);
1108 struct device *dev = &cfg->dev->dev;
1109 struct ctx_info *ctxi = NULL;
1110 struct page *err_page = NULL;
1111 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1112 vm_fault_t rc = 0;
1113 int ctxid;
1114
1115 ctxid = cfg->ops->process_element(ctx);
1116 if (unlikely(ctxid < 0)) {
1117 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1118 __func__, ctx, ctxid);
1119 goto err;
1120 }
1121
1122 ctxi = get_context(cfg, ctxid, file, ctrl);
1123 if (unlikely(!ctxi)) {
1124 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1125 goto err;
1126 }
1127
1128 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1129
1130 if (likely(!ctxi->err_recovery_active)) {
1131 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1132 rc = ctxi->cxl_mmap_vmops->fault(vmf);
1133 } else {
1134 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1135 __func__);
1136
1137 err_page = get_err_page(cfg);
1138 if (unlikely(!err_page)) {
1139 dev_err(dev, "%s: Could not get err_page\n", __func__);
1140 rc = VM_FAULT_RETRY;
1141 goto out;
1142 }
1143
1144 get_page(err_page);
1145 vmf->page = err_page;
1146 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1147 }
1148
1149 out:
1150 if (likely(ctxi))
1151 put_context(ctxi);
1152 dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
1153 return rc;
1154
1155 err:
1156 rc = VM_FAULT_SIGBUS;
1157 goto out;
1158 }
1159
1160 /*
1161 * Local MMAP vmops to 'catch' faults
1162 */
1163 static const struct vm_operations_struct cxlflash_mmap_vmops = {
1164 .fault = cxlflash_mmap_fault,
1165 };
1166
1167 /**
1168 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1169 * @file: File installed with adapter file descriptor.
1170 * @vma: VM area associated with mapping.
1171 *
1172 * Installs local mmap vmops to 'catch' faults for error notification support.
1173 *
1174 * Return: 0 on success, -errno on failure
1175 */
cxlflash_cxl_mmap(struct file * file,struct vm_area_struct * vma)1176 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1177 {
1178 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1179 cxl_fops);
1180 void *ctx = cfg->ops->fops_get_context(file);
1181 struct device *dev = &cfg->dev->dev;
1182 struct ctx_info *ctxi = NULL;
1183 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1184 int ctxid;
1185 int rc = 0;
1186
1187 ctxid = cfg->ops->process_element(ctx);
1188 if (unlikely(ctxid < 0)) {
1189 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1190 __func__, ctx, ctxid);
1191 rc = -EIO;
1192 goto out;
1193 }
1194
1195 ctxi = get_context(cfg, ctxid, file, ctrl);
1196 if (unlikely(!ctxi)) {
1197 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1198 rc = -EIO;
1199 goto out;
1200 }
1201
1202 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1203
1204 rc = cfg->ops->fd_mmap(file, vma);
1205 if (likely(!rc)) {
1206 /* Insert ourself in the mmap fault handler path */
1207 ctxi->cxl_mmap_vmops = vma->vm_ops;
1208 vma->vm_ops = &cxlflash_mmap_vmops;
1209 }
1210
1211 out:
1212 if (likely(ctxi))
1213 put_context(ctxi);
1214 return rc;
1215 }
1216
1217 const struct file_operations cxlflash_cxl_fops = {
1218 .owner = THIS_MODULE,
1219 .mmap = cxlflash_cxl_mmap,
1220 .release = cxlflash_cxl_release,
1221 };
1222
1223 /**
1224 * cxlflash_mark_contexts_error() - move contexts to error state and list
1225 * @cfg: Internal structure associated with the host.
1226 *
1227 * A context is only moved over to the error list when there are no outstanding
1228 * references to it. This ensures that a running operation has completed.
1229 *
1230 * Return: 0 on success, -errno on failure
1231 */
cxlflash_mark_contexts_error(struct cxlflash_cfg * cfg)1232 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1233 {
1234 int i, rc = 0;
1235 struct ctx_info *ctxi = NULL;
1236
1237 mutex_lock(&cfg->ctx_tbl_list_mutex);
1238
1239 for (i = 0; i < MAX_CONTEXT; i++) {
1240 ctxi = cfg->ctx_tbl[i];
1241 if (ctxi) {
1242 mutex_lock(&ctxi->mutex);
1243 cfg->ctx_tbl[i] = NULL;
1244 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1245 ctxi->err_recovery_active = true;
1246 ctxi->ctrl_map = NULL;
1247 unmap_context(ctxi);
1248 mutex_unlock(&ctxi->mutex);
1249 }
1250 }
1251
1252 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1253 return rc;
1254 }
1255
1256 /*
1257 * Dummy NULL fops
1258 */
1259 static const struct file_operations null_fops = {
1260 .owner = THIS_MODULE,
1261 };
1262
1263 /**
1264 * check_state() - checks and responds to the current adapter state
1265 * @cfg: Internal structure associated with the host.
1266 *
1267 * This routine can block and should only be used on process context.
1268 * It assumes that the caller is an ioctl thread and holding the ioctl
1269 * read semaphore. This is temporarily let up across the wait to allow
1270 * for draining actively running ioctls. Also note that when waking up
1271 * from waiting in reset, the state is unknown and must be checked again
1272 * before proceeding.
1273 *
1274 * Return: 0 on success, -errno on failure
1275 */
check_state(struct cxlflash_cfg * cfg)1276 int check_state(struct cxlflash_cfg *cfg)
1277 {
1278 struct device *dev = &cfg->dev->dev;
1279 int rc = 0;
1280
1281 retry:
1282 switch (cfg->state) {
1283 case STATE_RESET:
1284 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1285 up_read(&cfg->ioctl_rwsem);
1286 rc = wait_event_interruptible(cfg->reset_waitq,
1287 cfg->state != STATE_RESET);
1288 down_read(&cfg->ioctl_rwsem);
1289 if (unlikely(rc))
1290 break;
1291 goto retry;
1292 case STATE_FAILTERM:
1293 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1294 rc = -ENODEV;
1295 break;
1296 default:
1297 break;
1298 }
1299
1300 return rc;
1301 }
1302
1303 /**
1304 * cxlflash_disk_attach() - attach a LUN to a context
1305 * @sdev: SCSI device associated with LUN.
1306 * @attach: Attach ioctl data structure.
1307 *
1308 * Creates a context and attaches LUN to it. A LUN can only be attached
1309 * one time to a context (subsequent attaches for the same context/LUN pair
1310 * are not supported). Additional LUNs can be attached to a context by
1311 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1312 *
1313 * Return: 0 on success, -errno on failure
1314 */
cxlflash_disk_attach(struct scsi_device * sdev,struct dk_cxlflash_attach * attach)1315 static int cxlflash_disk_attach(struct scsi_device *sdev,
1316 struct dk_cxlflash_attach *attach)
1317 {
1318 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1319 struct device *dev = &cfg->dev->dev;
1320 struct afu *afu = cfg->afu;
1321 struct llun_info *lli = sdev->hostdata;
1322 struct glun_info *gli = lli->parent;
1323 struct ctx_info *ctxi = NULL;
1324 struct lun_access *lun_access = NULL;
1325 int rc = 0;
1326 u32 perms;
1327 int ctxid = -1;
1328 u64 irqs = attach->num_interrupts;
1329 u64 flags = 0UL;
1330 u64 rctxid = 0UL;
1331 struct file *file = NULL;
1332
1333 void *ctx = NULL;
1334
1335 int fd = -1;
1336
1337 if (irqs > 4) {
1338 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1339 __func__, irqs);
1340 rc = -EINVAL;
1341 goto out;
1342 }
1343
1344 if (gli->max_lba == 0) {
1345 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1346 __func__, lli->lun_id[sdev->channel]);
1347 rc = read_cap16(sdev, lli);
1348 if (rc) {
1349 dev_err(dev, "%s: Invalid device rc=%d\n",
1350 __func__, rc);
1351 rc = -ENODEV;
1352 goto out;
1353 }
1354 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1355 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1356 }
1357
1358 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1359 rctxid = attach->context_id;
1360 ctxi = get_context(cfg, rctxid, NULL, 0);
1361 if (!ctxi) {
1362 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1363 __func__, rctxid);
1364 rc = -EINVAL;
1365 goto out;
1366 }
1367
1368 list_for_each_entry(lun_access, &ctxi->luns, list)
1369 if (lun_access->lli == lli) {
1370 dev_dbg(dev, "%s: Already attached\n",
1371 __func__);
1372 rc = -EINVAL;
1373 goto out;
1374 }
1375 }
1376
1377 rc = scsi_device_get(sdev);
1378 if (unlikely(rc)) {
1379 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1380 goto out;
1381 }
1382
1383 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1384 if (unlikely(!lun_access)) {
1385 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1386 rc = -ENOMEM;
1387 goto err;
1388 }
1389
1390 lun_access->lli = lli;
1391 lun_access->sdev = sdev;
1392
1393 /* Non-NULL context indicates reuse (another context reference) */
1394 if (ctxi) {
1395 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1396 __func__, rctxid);
1397 kref_get(&ctxi->kref);
1398 list_add(&lun_access->list, &ctxi->luns);
1399 goto out_attach;
1400 }
1401
1402 ctxi = create_context(cfg);
1403 if (unlikely(!ctxi)) {
1404 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1405 __func__, ctxid);
1406 rc = -ENOMEM;
1407 goto err;
1408 }
1409
1410 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1411 if (IS_ERR_OR_NULL(ctx)) {
1412 dev_err(dev, "%s: Could not initialize context %p\n",
1413 __func__, ctx);
1414 rc = -ENODEV;
1415 goto err;
1416 }
1417
1418 rc = cfg->ops->start_work(ctx, irqs);
1419 if (unlikely(rc)) {
1420 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1421 __func__, rc);
1422 goto err;
1423 }
1424
1425 ctxid = cfg->ops->process_element(ctx);
1426 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1427 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1428 rc = -EPERM;
1429 goto err;
1430 }
1431
1432 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1433 if (unlikely(fd < 0)) {
1434 rc = -ENODEV;
1435 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1436 goto err;
1437 }
1438
1439 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1440 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1441
1442 /* Context mutex is locked upon return */
1443 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
1444
1445 rc = afu_attach(cfg, ctxi);
1446 if (unlikely(rc)) {
1447 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1448 goto err;
1449 }
1450
1451 /*
1452 * No error paths after this point. Once the fd is installed it's
1453 * visible to user space and can't be undone safely on this thread.
1454 * There is no need to worry about a deadlock here because no one
1455 * knows about us yet; we can be the only one holding our mutex.
1456 */
1457 list_add(&lun_access->list, &ctxi->luns);
1458 mutex_lock(&cfg->ctx_tbl_list_mutex);
1459 mutex_lock(&ctxi->mutex);
1460 cfg->ctx_tbl[ctxid] = ctxi;
1461 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1462 fd_install(fd, file);
1463
1464 out_attach:
1465 if (fd != -1)
1466 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1467 if (afu_is_sq_cmd_mode(afu))
1468 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1469
1470 attach->hdr.return_flags = flags;
1471 attach->context_id = ctxi->ctxid;
1472 attach->block_size = gli->blk_len;
1473 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1474 attach->last_lba = gli->max_lba;
1475 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1476 attach->max_xfer /= gli->blk_len;
1477
1478 out:
1479 attach->adap_fd = fd;
1480
1481 if (ctxi)
1482 put_context(ctxi);
1483
1484 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1485 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1486 return rc;
1487
1488 err:
1489 /* Cleanup CXL context; okay to 'stop' even if it was not started */
1490 if (!IS_ERR_OR_NULL(ctx)) {
1491 cfg->ops->stop_context(ctx);
1492 cfg->ops->release_context(ctx);
1493 ctx = NULL;
1494 }
1495
1496 /*
1497 * Here, we're overriding the fops with a dummy all-NULL fops because
1498 * fput() calls the release fop, which will cause us to mistakenly
1499 * call into the CXL code. Rather than try to add yet more complexity
1500 * to that routine (cxlflash_cxl_release) we should try to fix the
1501 * issue here.
1502 */
1503 if (fd > 0) {
1504 file->f_op = &null_fops;
1505 fput(file);
1506 put_unused_fd(fd);
1507 fd = -1;
1508 file = NULL;
1509 }
1510
1511 /* Cleanup our context */
1512 if (ctxi) {
1513 destroy_context(cfg, ctxi);
1514 ctxi = NULL;
1515 }
1516
1517 kfree(lun_access);
1518 scsi_device_put(sdev);
1519 goto out;
1520 }
1521
1522 /**
1523 * recover_context() - recovers a context in error
1524 * @cfg: Internal structure associated with the host.
1525 * @ctxi: Context to release.
1526 * @adap_fd: Adapter file descriptor associated with new/recovered context.
1527 *
1528 * Restablishes the state for a context-in-error.
1529 *
1530 * Return: 0 on success, -errno on failure
1531 */
recover_context(struct cxlflash_cfg * cfg,struct ctx_info * ctxi,int * adap_fd)1532 static int recover_context(struct cxlflash_cfg *cfg,
1533 struct ctx_info *ctxi,
1534 int *adap_fd)
1535 {
1536 struct device *dev = &cfg->dev->dev;
1537 int rc = 0;
1538 int fd = -1;
1539 int ctxid = -1;
1540 struct file *file;
1541 void *ctx;
1542 struct afu *afu = cfg->afu;
1543
1544 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1545 if (IS_ERR_OR_NULL(ctx)) {
1546 dev_err(dev, "%s: Could not initialize context %p\n",
1547 __func__, ctx);
1548 rc = -ENODEV;
1549 goto out;
1550 }
1551
1552 rc = cfg->ops->start_work(ctx, ctxi->irqs);
1553 if (unlikely(rc)) {
1554 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1555 __func__, rc);
1556 goto err1;
1557 }
1558
1559 ctxid = cfg->ops->process_element(ctx);
1560 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1561 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1562 rc = -EPERM;
1563 goto err2;
1564 }
1565
1566 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1567 if (unlikely(fd < 0)) {
1568 rc = -ENODEV;
1569 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1570 goto err2;
1571 }
1572
1573 /* Update with new MMIO area based on updated context id */
1574 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1575
1576 rc = afu_attach(cfg, ctxi);
1577 if (rc) {
1578 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1579 goto err3;
1580 }
1581
1582 /*
1583 * No error paths after this point. Once the fd is installed it's
1584 * visible to user space and can't be undone safely on this thread.
1585 */
1586 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1587 ctxi->ctx = ctx;
1588 ctxi->file = file;
1589
1590 /*
1591 * Put context back in table (note the reinit of the context list);
1592 * we must first drop the context's mutex and then acquire it in
1593 * order with the table/list mutex to avoid a deadlock - safe to do
1594 * here because no one can find us at this moment in time.
1595 */
1596 mutex_unlock(&ctxi->mutex);
1597 mutex_lock(&cfg->ctx_tbl_list_mutex);
1598 mutex_lock(&ctxi->mutex);
1599 list_del_init(&ctxi->list);
1600 cfg->ctx_tbl[ctxid] = ctxi;
1601 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1602 fd_install(fd, file);
1603 *adap_fd = fd;
1604 out:
1605 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1606 __func__, ctxid, fd, rc);
1607 return rc;
1608
1609 err3:
1610 fput(file);
1611 put_unused_fd(fd);
1612 err2:
1613 cfg->ops->stop_context(ctx);
1614 err1:
1615 cfg->ops->release_context(ctx);
1616 goto out;
1617 }
1618
1619 /**
1620 * cxlflash_afu_recover() - initiates AFU recovery
1621 * @sdev: SCSI device associated with LUN.
1622 * @recover: Recover ioctl data structure.
1623 *
1624 * Only a single recovery is allowed at a time to avoid exhausting CXL
1625 * resources (leading to recovery failure) in the event that we're up
1626 * against the maximum number of contexts limit. For similar reasons,
1627 * a context recovery is retried if there are multiple recoveries taking
1628 * place at the same time and the failure was due to CXL services being
1629 * unable to keep up.
1630 *
1631 * As this routine is called on ioctl context, it holds the ioctl r/w
1632 * semaphore that is used to drain ioctls in recovery scenarios. The
1633 * implementation to achieve the pacing described above (a local mutex)
1634 * requires that the ioctl r/w semaphore be dropped and reacquired to
1635 * avoid a 3-way deadlock when multiple process recoveries operate in
1636 * parallel.
1637 *
1638 * Because a user can detect an error condition before the kernel, it is
1639 * quite possible for this routine to act as the kernel's EEH detection
1640 * source (MMIO read of mbox_r). Because of this, there is a window of
1641 * time where an EEH might have been detected but not yet 'serviced'
1642 * (callback invoked, causing the device to enter reset state). To avoid
1643 * looping in this routine during that window, a 1 second sleep is in place
1644 * between the time the MMIO failure is detected and the time a wait on the
1645 * reset wait queue is attempted via check_state().
1646 *
1647 * Return: 0 on success, -errno on failure
1648 */
cxlflash_afu_recover(struct scsi_device * sdev,struct dk_cxlflash_recover_afu * recover)1649 static int cxlflash_afu_recover(struct scsi_device *sdev,
1650 struct dk_cxlflash_recover_afu *recover)
1651 {
1652 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1653 struct device *dev = &cfg->dev->dev;
1654 struct llun_info *lli = sdev->hostdata;
1655 struct afu *afu = cfg->afu;
1656 struct ctx_info *ctxi = NULL;
1657 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1658 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1659 u64 flags;
1660 u64 ctxid = DECODE_CTXID(recover->context_id),
1661 rctxid = recover->context_id;
1662 long reg;
1663 bool locked = true;
1664 int lretry = 20; /* up to 2 seconds */
1665 int new_adap_fd = -1;
1666 int rc = 0;
1667
1668 atomic_inc(&cfg->recovery_threads);
1669 up_read(&cfg->ioctl_rwsem);
1670 rc = mutex_lock_interruptible(mutex);
1671 down_read(&cfg->ioctl_rwsem);
1672 if (rc) {
1673 locked = false;
1674 goto out;
1675 }
1676
1677 rc = check_state(cfg);
1678 if (rc) {
1679 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1680 rc = -ENODEV;
1681 goto out;
1682 }
1683
1684 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1685 __func__, recover->reason, rctxid);
1686
1687 retry:
1688 /* Ensure that this process is attached to the context */
1689 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1690 if (unlikely(!ctxi)) {
1691 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1692 rc = -EINVAL;
1693 goto out;
1694 }
1695
1696 if (ctxi->err_recovery_active) {
1697 retry_recover:
1698 rc = recover_context(cfg, ctxi, &new_adap_fd);
1699 if (unlikely(rc)) {
1700 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1701 __func__, ctxid, rc);
1702 if ((rc == -ENODEV) &&
1703 ((atomic_read(&cfg->recovery_threads) > 1) ||
1704 (lretry--))) {
1705 dev_dbg(dev, "%s: Going to try again\n",
1706 __func__);
1707 mutex_unlock(mutex);
1708 msleep(100);
1709 rc = mutex_lock_interruptible(mutex);
1710 if (rc) {
1711 locked = false;
1712 goto out;
1713 }
1714 goto retry_recover;
1715 }
1716
1717 goto out;
1718 }
1719
1720 ctxi->err_recovery_active = false;
1721
1722 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1723 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1724 if (afu_is_sq_cmd_mode(afu))
1725 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1726
1727 recover->hdr.return_flags = flags;
1728 recover->context_id = ctxi->ctxid;
1729 recover->adap_fd = new_adap_fd;
1730 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1731 goto out;
1732 }
1733
1734 /* Test if in error state */
1735 reg = readq_be(&hwq->ctrl_map->mbox_r);
1736 if (reg == -1) {
1737 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1738
1739 /*
1740 * Before checking the state, put back the context obtained with
1741 * get_context() as it is no longer needed and sleep for a short
1742 * period of time (see prolog notes).
1743 */
1744 put_context(ctxi);
1745 ctxi = NULL;
1746 ssleep(1);
1747 rc = check_state(cfg);
1748 if (unlikely(rc))
1749 goto out;
1750 goto retry;
1751 }
1752
1753 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1754 out:
1755 if (likely(ctxi))
1756 put_context(ctxi);
1757 if (locked)
1758 mutex_unlock(mutex);
1759 atomic_dec_if_positive(&cfg->recovery_threads);
1760 return rc;
1761 }
1762
1763 /**
1764 * process_sense() - evaluates and processes sense data
1765 * @sdev: SCSI device associated with LUN.
1766 * @verify: Verify ioctl data structure.
1767 *
1768 * Return: 0 on success, -errno on failure
1769 */
process_sense(struct scsi_device * sdev,struct dk_cxlflash_verify * verify)1770 static int process_sense(struct scsi_device *sdev,
1771 struct dk_cxlflash_verify *verify)
1772 {
1773 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1774 struct device *dev = &cfg->dev->dev;
1775 struct llun_info *lli = sdev->hostdata;
1776 struct glun_info *gli = lli->parent;
1777 u64 prev_lba = gli->max_lba;
1778 struct scsi_sense_hdr sshdr = { 0 };
1779 int rc = 0;
1780
1781 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1782 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1783 if (!rc) {
1784 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1785 rc = -EINVAL;
1786 goto out;
1787 }
1788
1789 switch (sshdr.sense_key) {
1790 case NO_SENSE:
1791 case RECOVERED_ERROR:
1792 case NOT_READY:
1793 break;
1794 case UNIT_ATTENTION:
1795 switch (sshdr.asc) {
1796 case 0x29: /* Power on Reset or Device Reset */
1797 fallthrough;
1798 case 0x2A: /* Device settings/capacity changed */
1799 rc = read_cap16(sdev, lli);
1800 if (rc) {
1801 rc = -ENODEV;
1802 break;
1803 }
1804 if (prev_lba != gli->max_lba)
1805 dev_dbg(dev, "%s: Capacity changed old=%lld "
1806 "new=%lld\n", __func__, prev_lba,
1807 gli->max_lba);
1808 break;
1809 case 0x3F: /* Report LUNs changed, Rescan. */
1810 scsi_scan_host(cfg->host);
1811 break;
1812 default:
1813 rc = -EIO;
1814 break;
1815 }
1816 break;
1817 default:
1818 rc = -EIO;
1819 break;
1820 }
1821 out:
1822 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1823 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1824 return rc;
1825 }
1826
1827 /**
1828 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1829 * @sdev: SCSI device associated with LUN.
1830 * @verify: Verify ioctl data structure.
1831 *
1832 * Return: 0 on success, -errno on failure
1833 */
cxlflash_disk_verify(struct scsi_device * sdev,struct dk_cxlflash_verify * verify)1834 static int cxlflash_disk_verify(struct scsi_device *sdev,
1835 struct dk_cxlflash_verify *verify)
1836 {
1837 int rc = 0;
1838 struct ctx_info *ctxi = NULL;
1839 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1840 struct device *dev = &cfg->dev->dev;
1841 struct llun_info *lli = sdev->hostdata;
1842 struct glun_info *gli = lli->parent;
1843 struct sisl_rht_entry *rhte = NULL;
1844 res_hndl_t rhndl = verify->rsrc_handle;
1845 u64 ctxid = DECODE_CTXID(verify->context_id),
1846 rctxid = verify->context_id;
1847 u64 last_lba = 0;
1848
1849 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1850 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1851 verify->hint, verify->hdr.flags);
1852
1853 ctxi = get_context(cfg, rctxid, lli, 0);
1854 if (unlikely(!ctxi)) {
1855 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1856 rc = -EINVAL;
1857 goto out;
1858 }
1859
1860 rhte = get_rhte(ctxi, rhndl, lli);
1861 if (unlikely(!rhte)) {
1862 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1863 __func__, rhndl);
1864 rc = -EINVAL;
1865 goto out;
1866 }
1867
1868 /*
1869 * Look at the hint/sense to see if it requires us to redrive
1870 * inquiry (i.e. the Unit attention is due to the WWN changing).
1871 */
1872 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1873 /* Can't hold mutex across process_sense/read_cap16,
1874 * since we could have an intervening EEH event.
1875 */
1876 ctxi->unavail = true;
1877 mutex_unlock(&ctxi->mutex);
1878 rc = process_sense(sdev, verify);
1879 if (unlikely(rc)) {
1880 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1881 __func__, rc);
1882 mutex_lock(&ctxi->mutex);
1883 ctxi->unavail = false;
1884 goto out;
1885 }
1886 mutex_lock(&ctxi->mutex);
1887 ctxi->unavail = false;
1888 }
1889
1890 switch (gli->mode) {
1891 case MODE_PHYSICAL:
1892 last_lba = gli->max_lba;
1893 break;
1894 case MODE_VIRTUAL:
1895 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1896 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1897 last_lba /= CXLFLASH_BLOCK_SIZE;
1898 last_lba--;
1899 break;
1900 default:
1901 WARN(1, "Unsupported LUN mode!");
1902 }
1903
1904 verify->last_lba = last_lba;
1905
1906 out:
1907 if (likely(ctxi))
1908 put_context(ctxi);
1909 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1910 __func__, rc, verify->last_lba);
1911 return rc;
1912 }
1913
1914 /**
1915 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1916 * @cmd: The ioctl command to decode.
1917 *
1918 * Return: A string identifying the decoded ioctl.
1919 */
decode_ioctl(unsigned int cmd)1920 static char *decode_ioctl(unsigned int cmd)
1921 {
1922 switch (cmd) {
1923 case DK_CXLFLASH_ATTACH:
1924 return __stringify_1(DK_CXLFLASH_ATTACH);
1925 case DK_CXLFLASH_USER_DIRECT:
1926 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1927 case DK_CXLFLASH_USER_VIRTUAL:
1928 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1929 case DK_CXLFLASH_VLUN_RESIZE:
1930 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1931 case DK_CXLFLASH_RELEASE:
1932 return __stringify_1(DK_CXLFLASH_RELEASE);
1933 case DK_CXLFLASH_DETACH:
1934 return __stringify_1(DK_CXLFLASH_DETACH);
1935 case DK_CXLFLASH_VERIFY:
1936 return __stringify_1(DK_CXLFLASH_VERIFY);
1937 case DK_CXLFLASH_VLUN_CLONE:
1938 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1939 case DK_CXLFLASH_RECOVER_AFU:
1940 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1941 case DK_CXLFLASH_MANAGE_LUN:
1942 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1943 }
1944
1945 return "UNKNOWN";
1946 }
1947
1948 /**
1949 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1950 * @sdev: SCSI device associated with LUN.
1951 * @arg: UDirect ioctl data structure.
1952 *
1953 * On successful return, the user is informed of the resource handle
1954 * to be used to identify the direct lun and the size (in blocks) of
1955 * the direct lun in last LBA format.
1956 *
1957 * Return: 0 on success, -errno on failure
1958 */
cxlflash_disk_direct_open(struct scsi_device * sdev,void * arg)1959 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1960 {
1961 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1962 struct device *dev = &cfg->dev->dev;
1963 struct afu *afu = cfg->afu;
1964 struct llun_info *lli = sdev->hostdata;
1965 struct glun_info *gli = lli->parent;
1966 struct dk_cxlflash_release rel = { { 0 }, 0 };
1967
1968 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1969
1970 u64 ctxid = DECODE_CTXID(pphys->context_id),
1971 rctxid = pphys->context_id;
1972 u64 lun_size = 0;
1973 u64 last_lba = 0;
1974 u64 rsrc_handle = -1;
1975 u32 port = CHAN2PORTMASK(sdev->channel);
1976
1977 int rc = 0;
1978
1979 struct ctx_info *ctxi = NULL;
1980 struct sisl_rht_entry *rhte = NULL;
1981
1982 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1983
1984 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1985 if (unlikely(rc)) {
1986 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1987 goto out;
1988 }
1989
1990 ctxi = get_context(cfg, rctxid, lli, 0);
1991 if (unlikely(!ctxi)) {
1992 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1993 rc = -EINVAL;
1994 goto err1;
1995 }
1996
1997 rhte = rhte_checkout(ctxi, lli);
1998 if (unlikely(!rhte)) {
1999 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
2000 __func__, ctxid);
2001 rc = -EMFILE; /* too many opens */
2002 goto err1;
2003 }
2004
2005 rsrc_handle = (rhte - ctxi->rht_start);
2006
2007 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
2008
2009 last_lba = gli->max_lba;
2010 pphys->hdr.return_flags = 0;
2011 pphys->last_lba = last_lba;
2012 pphys->rsrc_handle = rsrc_handle;
2013
2014 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2015 if (unlikely(rc)) {
2016 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2017 goto err2;
2018 }
2019
2020 out:
2021 if (likely(ctxi))
2022 put_context(ctxi);
2023 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2024 __func__, rsrc_handle, rc, last_lba);
2025 return rc;
2026
2027 err2:
2028 marshal_udir_to_rele(pphys, &rel);
2029 _cxlflash_disk_release(sdev, ctxi, &rel);
2030 goto out;
2031 err1:
2032 cxlflash_lun_detach(gli);
2033 goto out;
2034 }
2035
2036 /**
2037 * ioctl_common() - common IOCTL handler for driver
2038 * @sdev: SCSI device associated with LUN.
2039 * @cmd: IOCTL command.
2040 *
2041 * Handles common fencing operations that are valid for multiple ioctls. Always
2042 * allow through ioctls that are cleanup oriented in nature, even when operating
2043 * in a failed/terminating state.
2044 *
2045 * Return: 0 on success, -errno on failure
2046 */
ioctl_common(struct scsi_device * sdev,unsigned int cmd)2047 static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
2048 {
2049 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2050 struct device *dev = &cfg->dev->dev;
2051 struct llun_info *lli = sdev->hostdata;
2052 int rc = 0;
2053
2054 if (unlikely(!lli)) {
2055 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2056 rc = -EINVAL;
2057 goto out;
2058 }
2059
2060 rc = check_state(cfg);
2061 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2062 switch (cmd) {
2063 case DK_CXLFLASH_VLUN_RESIZE:
2064 case DK_CXLFLASH_RELEASE:
2065 case DK_CXLFLASH_DETACH:
2066 dev_dbg(dev, "%s: Command override rc=%d\n",
2067 __func__, rc);
2068 rc = 0;
2069 break;
2070 }
2071 }
2072 out:
2073 return rc;
2074 }
2075
2076 /**
2077 * cxlflash_ioctl() - IOCTL handler for driver
2078 * @sdev: SCSI device associated with LUN.
2079 * @cmd: IOCTL command.
2080 * @arg: Userspace ioctl data structure.
2081 *
2082 * A read/write semaphore is used to implement a 'drain' of currently
2083 * running ioctls. The read semaphore is taken at the beginning of each
2084 * ioctl thread and released upon concluding execution. Additionally the
2085 * semaphore should be released and then reacquired in any ioctl execution
2086 * path which will wait for an event to occur that is outside the scope of
2087 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
2088 * a thread simply needs to acquire the write semaphore.
2089 *
2090 * Return: 0 on success, -errno on failure
2091 */
cxlflash_ioctl(struct scsi_device * sdev,unsigned int cmd,void __user * arg)2092 int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
2093 {
2094 typedef int (*sioctl) (struct scsi_device *, void *);
2095
2096 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2097 struct device *dev = &cfg->dev->dev;
2098 struct afu *afu = cfg->afu;
2099 struct dk_cxlflash_hdr *hdr;
2100 char buf[sizeof(union cxlflash_ioctls)];
2101 size_t size = 0;
2102 bool known_ioctl = false;
2103 int idx;
2104 int rc = 0;
2105 struct Scsi_Host *shost = sdev->host;
2106 sioctl do_ioctl = NULL;
2107
2108 static const struct {
2109 size_t size;
2110 sioctl ioctl;
2111 } ioctl_tbl[] = { /* NOTE: order matters here */
2112 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2113 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2114 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2115 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2116 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2117 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2118 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2119 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2120 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2121 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2122 };
2123
2124 /* Hold read semaphore so we can drain if needed */
2125 down_read(&cfg->ioctl_rwsem);
2126
2127 /* Restrict command set to physical support only for internal LUN */
2128 if (afu->internal_lun)
2129 switch (cmd) {
2130 case DK_CXLFLASH_RELEASE:
2131 case DK_CXLFLASH_USER_VIRTUAL:
2132 case DK_CXLFLASH_VLUN_RESIZE:
2133 case DK_CXLFLASH_VLUN_CLONE:
2134 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2135 __func__, decode_ioctl(cmd), afu->internal_lun);
2136 rc = -EINVAL;
2137 goto cxlflash_ioctl_exit;
2138 }
2139
2140 switch (cmd) {
2141 case DK_CXLFLASH_ATTACH:
2142 case DK_CXLFLASH_USER_DIRECT:
2143 case DK_CXLFLASH_RELEASE:
2144 case DK_CXLFLASH_DETACH:
2145 case DK_CXLFLASH_VERIFY:
2146 case DK_CXLFLASH_RECOVER_AFU:
2147 case DK_CXLFLASH_USER_VIRTUAL:
2148 case DK_CXLFLASH_VLUN_RESIZE:
2149 case DK_CXLFLASH_VLUN_CLONE:
2150 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2151 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2152 sdev->channel, sdev->id, sdev->lun);
2153 rc = ioctl_common(sdev, cmd);
2154 if (unlikely(rc))
2155 goto cxlflash_ioctl_exit;
2156
2157 fallthrough;
2158
2159 case DK_CXLFLASH_MANAGE_LUN:
2160 known_ioctl = true;
2161 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2162 size = ioctl_tbl[idx].size;
2163 do_ioctl = ioctl_tbl[idx].ioctl;
2164
2165 if (likely(do_ioctl))
2166 break;
2167
2168 fallthrough;
2169 default:
2170 rc = -EINVAL;
2171 goto cxlflash_ioctl_exit;
2172 }
2173
2174 if (unlikely(copy_from_user(&buf, arg, size))) {
2175 dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2176 __func__, size, cmd, decode_ioctl(cmd), arg);
2177 rc = -EFAULT;
2178 goto cxlflash_ioctl_exit;
2179 }
2180
2181 hdr = (struct dk_cxlflash_hdr *)&buf;
2182 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2183 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2184 __func__, hdr->version, decode_ioctl(cmd));
2185 rc = -EINVAL;
2186 goto cxlflash_ioctl_exit;
2187 }
2188
2189 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2190 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2191 rc = -EINVAL;
2192 goto cxlflash_ioctl_exit;
2193 }
2194
2195 rc = do_ioctl(sdev, (void *)&buf);
2196 if (likely(!rc))
2197 if (unlikely(copy_to_user(arg, &buf, size))) {
2198 dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2199 __func__, size, cmd, decode_ioctl(cmd), arg);
2200 rc = -EFAULT;
2201 }
2202
2203 /* fall through to exit */
2204
2205 cxlflash_ioctl_exit:
2206 up_read(&cfg->ioctl_rwsem);
2207 if (unlikely(rc && known_ioctl))
2208 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2209 "returned rc %d\n", __func__,
2210 decode_ioctl(cmd), cmd, shost->host_no,
2211 sdev->channel, sdev->id, sdev->lun, rc);
2212 else
2213 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2214 "returned rc %d\n", __func__, decode_ioctl(cmd),
2215 cmd, shost->host_no, sdev->channel, sdev->id,
2216 sdev->lun, rc);
2217 return rc;
2218 }
2219