1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zcap;
298 	unsigned int zsize;
299 	unsigned int zsize_shift;
300 	unsigned int nr_zones;
301 	unsigned int nr_conv_zones;
302 	unsigned int nr_seq_zones;
303 	unsigned int nr_imp_open;
304 	unsigned int nr_exp_open;
305 	unsigned int nr_closed;
306 	unsigned int max_open;
307 	ktime_t create_ts;	/* time since bootup that this device was created */
308 	struct sdeb_zone_state *zstate;
309 };
310 
311 struct sdebug_host_info {
312 	struct list_head host_list;
313 	int si_idx;	/* sdeb_store_info (per host) xarray index */
314 	struct Scsi_Host *shost;
315 	struct device dev;
316 	struct list_head dev_info_list;
317 };
318 
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 	rwlock_t macc_lck;	/* for atomic media access on this store */
322 	u8 *storep;		/* user data storage (ram) */
323 	struct t10_pi_tuple *dif_storep; /* protection info */
324 	void *map_storep;	/* provisioning map */
325 };
326 
327 #define to_sdebug_host(d)	\
328 	container_of(d, struct sdebug_host_info, dev)
329 
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
332 
333 struct sdebug_defer {
334 	struct hrtimer hrt;
335 	struct execute_work ew;
336 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 	int sqa_idx;	/* index of sdebug_queue array */
338 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
339 	int hc_idx;	/* hostwide tag index */
340 	int issuing_cpu;
341 	bool init_hrt;
342 	bool init_wq;
343 	bool init_poll;
344 	bool aborted;	/* true when blk_abort_request() already called */
345 	enum sdeb_defer_type defer_t;
346 };
347 
348 struct sdebug_queued_cmd {
349 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 	 * instance indicates this slot is in use.
351 	 */
352 	struct sdebug_defer *sd_dp;
353 	struct scsi_cmnd *a_cmnd;
354 };
355 
356 struct sdebug_queue {
357 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
359 	spinlock_t qc_lock;
360 	atomic_t blocked;	/* to temporarily stop more being queued */
361 };
362 
363 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
364 static atomic_t sdebug_completions;  /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
369 
370 struct opcode_info_t {
371 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
372 				/* for terminating element */
373 	u8 opcode;		/* if num_attached > 0, preferred */
374 	u16 sa;			/* service action */
375 	u32 flags;		/* OR-ed set of SDEB_F_* */
376 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
378 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
379 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 };
381 
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 	SDEB_I_INVALID_OPCODE =	0,
385 	SDEB_I_INQUIRY = 1,
386 	SDEB_I_REPORT_LUNS = 2,
387 	SDEB_I_REQUEST_SENSE = 3,
388 	SDEB_I_TEST_UNIT_READY = 4,
389 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
390 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
391 	SDEB_I_LOG_SENSE = 7,
392 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
393 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
394 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
395 	SDEB_I_START_STOP = 11,
396 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
397 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
398 	SDEB_I_MAINT_IN = 14,
399 	SDEB_I_MAINT_OUT = 15,
400 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
401 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
402 	SDEB_I_RESERVE = 18,		/* 6, 10 */
403 	SDEB_I_RELEASE = 19,		/* 6, 10 */
404 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
405 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
406 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
407 	SDEB_I_SEND_DIAG = 23,
408 	SDEB_I_UNMAP = 24,
409 	SDEB_I_WRITE_BUFFER = 25,
410 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
411 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
412 	SDEB_I_COMP_WRITE = 28,
413 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
414 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
415 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
416 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
417 };
418 
419 
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
423 	    0, 0, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
426 	    SDEB_I_RELEASE,
427 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 	    SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
438 	    SDEB_I_RELEASE,
439 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 	0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 	0, 0, 0, SDEB_I_VERIFY,
448 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 	     SDEB_I_MAINT_OUT, 0, 0, 0,
454 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 };
464 
465 /*
466  * The following "response" functions return the SCSI mid-level's 4 byte
467  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468  * command completion, they can mask their return value with
469  * SDEG_RES_IMMED_MASK .
470  */
471 #define SDEG_RES_IMMED_MASK 0x40000000
472 
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
509 
510 /*
511  * The following are overflow arrays for cdbs that "hit" the same index in
512  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513  * should be placed in opcode_info_arr[], the others should be placed here.
514  */
515 static const struct opcode_info_t msense_iarr[] = {
516 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
517 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t mselect_iarr[] = {
521 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
522 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524 
525 static const struct opcode_info_t read_iarr[] = {
526 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528 	     0, 0, 0, 0} },
529 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 	     0xc7, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t write_iarr[] = {
537 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
538 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539 		   0, 0, 0, 0, 0, 0} },
540 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
541 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542 		   0, 0, 0} },
543 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
544 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 		   0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t verify_iarr[] = {
549 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 };
553 
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
558 };
559 
560 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
561 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
564 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
567 };
568 
569 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
570 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577 
578 static const struct opcode_info_t write_same_iarr[] = {
579 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
582 };
583 
584 static const struct opcode_info_t reserve_iarr[] = {
585 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
586 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t release_iarr[] = {
590 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
591 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593 
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
598 };
599 
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
604 };
605 
606 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
607 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
610 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
613 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
616 };
617 
618 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
619 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623 
624 
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626  * plus the terminating elements for logic that scans this table such as
627  * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
631 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 	     0, 0} },					/* REPORT LUNS */
637 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
643 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
644 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
646 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
647 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
649 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650 	     0, 0, 0} },
651 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
652 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653 	     0, 0} },
654 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
656 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
660 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
671 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
673 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 				0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
680 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
684 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685 	     0xff, 0xff} },
686 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
688 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 	     0} },
690 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
692 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693 	     0} },
694 /* 20 */
695 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
702 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
709 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
711 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712 		 0, 0, 0, 0, 0} },
713 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 	    resp_sync_cache, sync_cache_iarr,
715 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
717 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
720 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 	    resp_pre_fetch, pre_fetch_iarr,
722 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
724 
725 /* 30 */
726 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
736 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738 
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue;	/* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
799 
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 			  SAM_LUN_AM_FLAT = 0x1,
802 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 			  SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity;	/* in sectors */
809 
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811    may still need them */
812 static int sdebug_heads;		/* heads per disk */
813 static int sdebug_cylinders_per;	/* cylinders per surface */
814 static int sdebug_sectors_per;		/* sectors per cylinder */
815 
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
818 
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
824 
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
834 
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 
842 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
845 
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
848 
849 static rwlock_t *ramdisk_lck_a[2];
850 
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
853 
854 static struct bus_type pseudo_lld_bus;
855 
856 static struct device_driver sdebug_driverfs_driver = {
857 	.name 		= sdebug_proc_name,
858 	.bus		= &pseudo_lld_bus,
859 };
860 
861 static const int check_condition_result =
862 	SAM_STAT_CHECK_CONDITION;
863 
864 static const int illegal_condition_result =
865 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
866 
867 static const int device_qfull_result =
868 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
869 
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
871 
872 
873 /* Only do the extra work involved in logical block provisioning if one or
874  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875  * real reads and writes (i.e. not skipping them for speed).
876  */
scsi_debug_lbp(void)877 static inline bool scsi_debug_lbp(void)
878 {
879 	return 0 == sdebug_fake_rw &&
880 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
881 }
882 
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 			    unsigned long long lba)
885 {
886 	struct sdeb_store_info *lsip = sip;
887 
888 	lba = do_div(lba, sdebug_store_sectors);
889 	if (!sip || !sip->storep) {
890 		WARN_ON_ONCE(true);
891 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
892 	}
893 	return lsip->storep + lba * sdebug_sector_size;
894 }
895 
dif_store(struct sdeb_store_info * sip,sector_t sector)896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
897 				      sector_t sector)
898 {
899 	sector = sector_div(sector, sdebug_store_sectors);
900 
901 	return sip->dif_storep + sector;
902 }
903 
sdebug_max_tgts_luns(void)904 static void sdebug_max_tgts_luns(void)
905 {
906 	struct sdebug_host_info *sdbg_host;
907 	struct Scsi_Host *hpnt;
908 
909 	spin_lock(&sdebug_host_list_lock);
910 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 		hpnt = sdbg_host->shost;
912 		if ((hpnt->this_id >= 0) &&
913 		    (sdebug_num_tgts > hpnt->this_id))
914 			hpnt->max_id = sdebug_num_tgts + 1;
915 		else
916 			hpnt->max_id = sdebug_num_tgts;
917 		/* sdebug_max_luns; */
918 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
919 	}
920 	spin_unlock(&sdebug_host_list_lock);
921 }
922 
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
924 
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 				 enum sdeb_cmd_data c_d,
928 				 int in_byte, int in_bit)
929 {
930 	unsigned char *sbuff;
931 	u8 sks[4];
932 	int sl, asc;
933 
934 	sbuff = scp->sense_buffer;
935 	if (!sbuff) {
936 		sdev_printk(KERN_ERR, scp->device,
937 			    "%s: sense_buffer is NULL\n", __func__);
938 		return;
939 	}
940 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 	memset(sks, 0, sizeof(sks));
944 	sks[0] = 0x80;
945 	if (c_d)
946 		sks[0] |= 0x40;
947 	if (in_bit >= 0) {
948 		sks[0] |= 0x8;
949 		sks[0] |= 0x7 & in_bit;
950 	}
951 	put_unaligned_be16(in_byte, sks + 1);
952 	if (sdebug_dsense) {
953 		sl = sbuff[7] + 8;
954 		sbuff[7] = sl;
955 		sbuff[sl] = 0x2;
956 		sbuff[sl + 1] = 0x6;
957 		memcpy(sbuff + sl + 4, sks, 3);
958 	} else
959 		memcpy(sbuff + 15, sks, 3);
960 	if (sdebug_verbose)
961 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
962 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
964 }
965 
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
967 {
968 	if (!scp->sense_buffer) {
969 		sdev_printk(KERN_ERR, scp->device,
970 			    "%s: sense_buffer is NULL\n", __func__);
971 		return;
972 	}
973 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
974 
975 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
976 
977 	if (sdebug_verbose)
978 		sdev_printk(KERN_INFO, scp->device,
979 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 			    my_name, key, asc, asq);
981 }
982 
mk_sense_invalid_opcode(struct scsi_cmnd * scp)983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
984 {
985 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 }
987 
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 			    void __user *arg)
990 {
991 	if (sdebug_verbose) {
992 		if (0x1261 == cmd)
993 			sdev_printk(KERN_INFO, dev,
994 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
995 		else if (0x5331 == cmd)
996 			sdev_printk(KERN_INFO, dev,
997 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 				    __func__);
999 		else
1000 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001 				    __func__, cmd);
1002 	}
1003 	return -EINVAL;
1004 	/* return -ENOTTY; // correct return but upsets fdisk */
1005 }
1006 
config_cdb_len(struct scsi_device * sdev)1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009 	switch (sdebug_cdb_len) {
1010 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = false;
1013 		sdev->use_10_for_ms = false;
1014 		break;
1015 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 		sdev->use_10_for_rw = true;
1017 		sdev->use_16_for_rw = false;
1018 		sdev->use_10_for_ms = false;
1019 		break;
1020 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 		sdev->use_10_for_rw = true;
1022 		sdev->use_16_for_rw = false;
1023 		sdev->use_10_for_ms = true;
1024 		break;
1025 	case 16:
1026 		sdev->use_10_for_rw = false;
1027 		sdev->use_16_for_rw = true;
1028 		sdev->use_10_for_ms = true;
1029 		break;
1030 	case 32: /* No knobs to suggest this so same as 16 for now */
1031 		sdev->use_10_for_rw = false;
1032 		sdev->use_16_for_rw = true;
1033 		sdev->use_10_for_ms = true;
1034 		break;
1035 	default:
1036 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1037 			sdebug_cdb_len);
1038 		sdev->use_10_for_rw = true;
1039 		sdev->use_16_for_rw = false;
1040 		sdev->use_10_for_ms = false;
1041 		sdebug_cdb_len = 10;
1042 		break;
1043 	}
1044 }
1045 
all_config_cdb_len(void)1046 static void all_config_cdb_len(void)
1047 {
1048 	struct sdebug_host_info *sdbg_host;
1049 	struct Scsi_Host *shost;
1050 	struct scsi_device *sdev;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 		shost = sdbg_host->shost;
1055 		shost_for_each_device(sdev, shost) {
1056 			config_cdb_len(sdev);
1057 		}
1058 	}
1059 	spin_unlock(&sdebug_host_list_lock);
1060 }
1061 
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064 	struct sdebug_host_info *sdhp;
1065 	struct sdebug_dev_info *dp;
1066 
1067 	spin_lock(&sdebug_host_list_lock);
1068 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 			if ((devip->sdbg_host == dp->sdbg_host) &&
1071 			    (devip->target == dp->target))
1072 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 		}
1074 	}
1075 	spin_unlock(&sdebug_host_list_lock);
1076 }
1077 
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080 	int k;
1081 
1082 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 	if (k != SDEBUG_NUM_UAS) {
1084 		const char *cp = NULL;
1085 
1086 		switch (k) {
1087 		case SDEBUG_UA_POR:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 					POWER_ON_RESET_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "power on reset";
1092 			break;
1093 		case SDEBUG_UA_POOCCUR:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 					POWER_ON_OCCURRED_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "power on occurred";
1098 			break;
1099 		case SDEBUG_UA_BUS_RESET:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1101 					BUS_RESET_ASCQ);
1102 			if (sdebug_verbose)
1103 				cp = "bus reset";
1104 			break;
1105 		case SDEBUG_UA_MODE_CHANGED:
1106 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 					MODE_CHANGED_ASCQ);
1108 			if (sdebug_verbose)
1109 				cp = "mode parameters changed";
1110 			break;
1111 		case SDEBUG_UA_CAPACITY_CHANGED:
1112 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 					CAPACITY_CHANGED_ASCQ);
1114 			if (sdebug_verbose)
1115 				cp = "capacity data changed";
1116 			break;
1117 		case SDEBUG_UA_MICROCODE_CHANGED:
1118 			mk_sense_buffer(scp, UNIT_ATTENTION,
1119 					TARGET_CHANGED_ASC,
1120 					MICROCODE_CHANGED_ASCQ);
1121 			if (sdebug_verbose)
1122 				cp = "microcode has been changed";
1123 			break;
1124 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 			mk_sense_buffer(scp, UNIT_ATTENTION,
1126 					TARGET_CHANGED_ASC,
1127 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1128 			if (sdebug_verbose)
1129 				cp = "microcode has been changed without reset";
1130 			break;
1131 		case SDEBUG_UA_LUNS_CHANGED:
1132 			/*
1133 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 			 * on the target, until a REPORT LUNS command is
1136 			 * received.  SPC-4 behavior is to report it only once.
1137 			 * NOTE:  sdebug_scsi_level does not use the same
1138 			 * values as struct scsi_device->scsi_level.
1139 			 */
1140 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1141 				clear_luns_changed_on_target(devip);
1142 			mk_sense_buffer(scp, UNIT_ATTENTION,
1143 					TARGET_CHANGED_ASC,
1144 					LUNS_CHANGED_ASCQ);
1145 			if (sdebug_verbose)
1146 				cp = "reported luns data has changed";
1147 			break;
1148 		default:
1149 			pr_warn("unexpected unit attention code=%d\n", k);
1150 			if (sdebug_verbose)
1151 				cp = "unknown";
1152 			break;
1153 		}
1154 		clear_bit(k, devip->uas_bm);
1155 		if (sdebug_verbose)
1156 			sdev_printk(KERN_INFO, scp->device,
1157 				   "%s reports: Unit attention: %s\n",
1158 				   my_name, cp);
1159 		return check_condition_result;
1160 	}
1161 	return 0;
1162 }
1163 
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1166 				int arr_len)
1167 {
1168 	int act_len;
1169 	struct scsi_data_buffer *sdb = &scp->sdb;
1170 
1171 	if (!sdb->length)
1172 		return 0;
1173 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 		return DID_ERROR << 16;
1175 
1176 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1177 				      arr, arr_len);
1178 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1179 
1180 	return 0;
1181 }
1182 
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185  * calls, not required to write in ascending offset order. Assumes resid
1186  * set to scsi_bufflen() prior to any calls.
1187  */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 				  int arr_len, unsigned int off_dst)
1190 {
1191 	unsigned int act_len, n;
1192 	struct scsi_data_buffer *sdb = &scp->sdb;
1193 	off_t skip = off_dst;
1194 
1195 	if (sdb->length <= off_dst)
1196 		return 0;
1197 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 		return DID_ERROR << 16;
1199 
1200 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 				       arr, arr_len, skip);
1202 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 		 scsi_get_resid(scp));
1205 	n = scsi_bufflen(scp) - (off_dst + act_len);
1206 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1207 	return 0;
1208 }
1209 
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211  * 'arr' or -1 if error.
1212  */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1214 			       int arr_len)
1215 {
1216 	if (!scsi_bufflen(scp))
1217 		return 0;
1218 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1219 		return -1;
1220 
1221 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1222 }
1223 
1224 
1225 static char sdebug_inq_vendor_id[9] = "Linux   ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1232 
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 			  int target_dev_id, int dev_id_num,
1236 			  const char *dev_id_str, int dev_id_str_len,
1237 			  const uuid_t *lu_name)
1238 {
1239 	int num, port_a;
1240 	char b[32];
1241 
1242 	port_a = target_dev_id + 1;
1243 	/* T10 vendor identifier field format (faked) */
1244 	arr[0] = 0x2;	/* ASCII */
1245 	arr[1] = 0x1;
1246 	arr[2] = 0x0;
1247 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 	num = 8 + 16 + dev_id_str_len;
1251 	arr[3] = num;
1252 	num += 4;
1253 	if (dev_id_num >= 0) {
1254 		if (sdebug_uuid_ctl) {
1255 			/* Locally assigned UUID */
1256 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1257 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1258 			arr[num++] = 0x0;
1259 			arr[num++] = 0x12;
1260 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1261 			arr[num++] = 0x0;
1262 			memcpy(arr + num, lu_name, 16);
1263 			num += 16;
1264 		} else {
1265 			/* NAA-3, Logical unit identifier (binary) */
1266 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1267 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1268 			arr[num++] = 0x0;
1269 			arr[num++] = 0x8;
1270 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1271 			num += 8;
1272 		}
1273 		/* Target relative port number */
1274 		arr[num++] = 0x61;	/* proto=sas, binary */
1275 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x4;	/* length */
1278 		arr[num++] = 0x0;	/* reserved */
1279 		arr[num++] = 0x0;	/* reserved */
1280 		arr[num++] = 0x0;
1281 		arr[num++] = 0x1;	/* relative port A */
1282 	}
1283 	/* NAA-3, Target port identifier */
1284 	arr[num++] = 0x61;	/* proto=sas, binary */
1285 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1286 	arr[num++] = 0x0;
1287 	arr[num++] = 0x8;
1288 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1289 	num += 8;
1290 	/* NAA-3, Target port group identifier */
1291 	arr[num++] = 0x61;	/* proto=sas, binary */
1292 	arr[num++] = 0x95;	/* piv=1, target port group id */
1293 	arr[num++] = 0x0;
1294 	arr[num++] = 0x4;
1295 	arr[num++] = 0;
1296 	arr[num++] = 0;
1297 	put_unaligned_be16(port_group_id, arr + num);
1298 	num += 2;
1299 	/* NAA-3, Target device identifier */
1300 	arr[num++] = 0x61;	/* proto=sas, binary */
1301 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1302 	arr[num++] = 0x0;
1303 	arr[num++] = 0x8;
1304 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1305 	num += 8;
1306 	/* SCSI name string: Target device identifier */
1307 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1308 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1309 	arr[num++] = 0x0;
1310 	arr[num++] = 24;
1311 	memcpy(arr + num, "naa.32222220", 12);
1312 	num += 12;
1313 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 	memcpy(arr + num, b, 8);
1315 	num += 8;
1316 	memset(arr + num, 0, 4);
1317 	num += 4;
1318 	return num;
1319 }
1320 
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323     0x22,0x22,0x22,0x0,0xbb,0x1,
1324     0x22,0x22,0x22,0x0,0xbb,0x2,
1325 };
1326 
1327 /*  Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1328 static int inquiry_vpd_84(unsigned char *arr)
1329 {
1330 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 	return sizeof(vpd84_data);
1332 }
1333 
1334 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1335 static int inquiry_vpd_85(unsigned char *arr)
1336 {
1337 	int num = 0;
1338 	const char *na1 = "https://www.kernel.org/config";
1339 	const char *na2 = "http://www.kernel.org/log";
1340 	int plen, olen;
1341 
1342 	arr[num++] = 0x1;	/* lu, storage config */
1343 	arr[num++] = 0x0;	/* reserved */
1344 	arr[num++] = 0x0;
1345 	olen = strlen(na1);
1346 	plen = olen + 1;
1347 	if (plen % 4)
1348 		plen = ((plen / 4) + 1) * 4;
1349 	arr[num++] = plen;	/* length, null termianted, padded */
1350 	memcpy(arr + num, na1, olen);
1351 	memset(arr + num + olen, 0, plen - olen);
1352 	num += plen;
1353 
1354 	arr[num++] = 0x4;	/* lu, logging */
1355 	arr[num++] = 0x0;	/* reserved */
1356 	arr[num++] = 0x0;
1357 	olen = strlen(na2);
1358 	plen = olen + 1;
1359 	if (plen % 4)
1360 		plen = ((plen / 4) + 1) * 4;
1361 	arr[num++] = plen;	/* length, null terminated, padded */
1362 	memcpy(arr + num, na2, olen);
1363 	memset(arr + num + olen, 0, plen - olen);
1364 	num += plen;
1365 
1366 	return num;
1367 }
1368 
1369 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1371 {
1372 	int num = 0;
1373 	int port_a, port_b;
1374 
1375 	port_a = target_dev_id + 1;
1376 	port_b = port_a + 1;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (A) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1391 	num += 8;
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x0;
1395 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1396 	memset(arr + num, 0, 6);
1397 	num += 6;
1398 	arr[num++] = 0x0;
1399 	arr[num++] = 12;	/* length tp descriptor */
1400 	/* naa-5 target port identifier (B) */
1401 	arr[num++] = 0x61;	/* proto=sas, binary */
1402 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1403 	arr[num++] = 0x0;	/* reserved */
1404 	arr[num++] = 0x8;	/* length */
1405 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1406 	num += 8;
1407 
1408 	return num;
1409 }
1410 
1411 
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1416 '1','2','3','4',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1418 0xec,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1423 0x53,0x41,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1427 0x10,0x80,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1454 };
1455 
1456 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1457 static int inquiry_vpd_89(unsigned char *arr)
1458 {
1459 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 	return sizeof(vpd89_data);
1461 }
1462 
1463 
1464 static unsigned char vpdb0_data[] = {
1465 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 };
1470 
1471 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1472 static int inquiry_vpd_b0(unsigned char *arr)
1473 {
1474 	unsigned int gran;
1475 
1476 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1477 
1478 	/* Optimal transfer length granularity */
1479 	if (sdebug_opt_xferlen_exp != 0 &&
1480 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 		gran = 1 << sdebug_opt_xferlen_exp;
1482 	else
1483 		gran = 1 << sdebug_physblk_exp;
1484 	put_unaligned_be16(gran, arr + 2);
1485 
1486 	/* Maximum Transfer Length */
1487 	if (sdebug_store_sectors > 0x400)
1488 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1489 
1490 	/* Optimal Transfer Length */
1491 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1492 
1493 	if (sdebug_lbpu) {
1494 		/* Maximum Unmap LBA Count */
1495 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1496 
1497 		/* Maximum Unmap Block Descriptor Count */
1498 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1499 	}
1500 
1501 	/* Unmap Granularity Alignment */
1502 	if (sdebug_unmap_alignment) {
1503 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 		arr[28] |= 0x80; /* UGAVALID */
1505 	}
1506 
1507 	/* Optimal Unmap Granularity */
1508 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1509 
1510 	/* Maximum WRITE SAME Length */
1511 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1512 
1513 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1514 
1515 	return sizeof(vpdb0_data);
1516 }
1517 
1518 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1519 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1520 {
1521 	memset(arr, 0, 0x3c);
1522 	arr[0] = 0;
1523 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1524 	arr[2] = 0;
1525 	arr[3] = 5;	/* less than 1.8" */
1526 	if (devip->zmodel == BLK_ZONED_HA)
1527 		arr[4] = 1 << 4;	/* zoned field = 01b */
1528 
1529 	return 0x3c;
1530 }
1531 
1532 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1533 static int inquiry_vpd_b2(unsigned char *arr)
1534 {
1535 	memset(arr, 0, 0x4);
1536 	arr[0] = 0;			/* threshold exponent */
1537 	if (sdebug_lbpu)
1538 		arr[1] = 1 << 7;
1539 	if (sdebug_lbpws)
1540 		arr[1] |= 1 << 6;
1541 	if (sdebug_lbpws10)
1542 		arr[1] |= 1 << 5;
1543 	if (sdebug_lbprz && scsi_debug_lbp())
1544 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1545 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1546 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1547 	/* threshold_percentage=0 */
1548 	return 0x4;
1549 }
1550 
1551 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1552 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1553 {
1554 	memset(arr, 0, 0x3c);
1555 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1556 	/*
1557 	 * Set Optimal number of open sequential write preferred zones and
1558 	 * Optimal number of non-sequentially written sequential write
1559 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1560 	 * fields set to zero, apart from Max. number of open swrz_s field.
1561 	 */
1562 	put_unaligned_be32(0xffffffff, &arr[4]);
1563 	put_unaligned_be32(0xffffffff, &arr[8]);
1564 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1565 		put_unaligned_be32(devip->max_open, &arr[12]);
1566 	else
1567 		put_unaligned_be32(0xffffffff, &arr[12]);
1568 	if (devip->zcap < devip->zsize) {
1569 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1570 		put_unaligned_be64(devip->zsize, &arr[20]);
1571 	} else {
1572 		arr[19] = 0;
1573 	}
1574 	return 0x3c;
1575 }
1576 
1577 #define SDEBUG_LONG_INQ_SZ 96
1578 #define SDEBUG_MAX_INQ_ARR_SZ 584
1579 
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1580 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1581 {
1582 	unsigned char pq_pdt;
1583 	unsigned char *arr;
1584 	unsigned char *cmd = scp->cmnd;
1585 	u32 alloc_len, n;
1586 	int ret;
1587 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1588 
1589 	alloc_len = get_unaligned_be16(cmd + 3);
1590 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1591 	if (! arr)
1592 		return DID_REQUEUE << 16;
1593 	is_disk = (sdebug_ptype == TYPE_DISK);
1594 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1595 	is_disk_zbc = (is_disk || is_zbc);
1596 	have_wlun = scsi_is_wlun(scp->device->lun);
1597 	if (have_wlun)
1598 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1599 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1600 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1601 	else
1602 		pq_pdt = (sdebug_ptype & 0x1f);
1603 	arr[0] = pq_pdt;
1604 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1605 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1606 		kfree(arr);
1607 		return check_condition_result;
1608 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1609 		int lu_id_num, port_group_id, target_dev_id;
1610 		u32 len;
1611 		char lu_id_str[6];
1612 		int host_no = devip->sdbg_host->shost->host_no;
1613 
1614 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1615 		    (devip->channel & 0x7f);
1616 		if (sdebug_vpd_use_hostno == 0)
1617 			host_no = 0;
1618 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1619 			    (devip->target * 1000) + devip->lun);
1620 		target_dev_id = ((host_no + 1) * 2000) +
1621 				 (devip->target * 1000) - 3;
1622 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1623 		if (0 == cmd[2]) { /* supported vital product data pages */
1624 			arr[1] = cmd[2];	/*sanity */
1625 			n = 4;
1626 			arr[n++] = 0x0;   /* this page */
1627 			arr[n++] = 0x80;  /* unit serial number */
1628 			arr[n++] = 0x83;  /* device identification */
1629 			arr[n++] = 0x84;  /* software interface ident. */
1630 			arr[n++] = 0x85;  /* management network addresses */
1631 			arr[n++] = 0x86;  /* extended inquiry */
1632 			arr[n++] = 0x87;  /* mode page policy */
1633 			arr[n++] = 0x88;  /* SCSI ports */
1634 			if (is_disk_zbc) {	  /* SBC or ZBC */
1635 				arr[n++] = 0x89;  /* ATA information */
1636 				arr[n++] = 0xb0;  /* Block limits */
1637 				arr[n++] = 0xb1;  /* Block characteristics */
1638 				if (is_disk)
1639 					arr[n++] = 0xb2;  /* LB Provisioning */
1640 				if (is_zbc)
1641 					arr[n++] = 0xb6;  /* ZB dev. char. */
1642 			}
1643 			arr[3] = n - 4;	  /* number of supported VPD pages */
1644 		} else if (0x80 == cmd[2]) { /* unit serial number */
1645 			arr[1] = cmd[2];	/*sanity */
1646 			arr[3] = len;
1647 			memcpy(&arr[4], lu_id_str, len);
1648 		} else if (0x83 == cmd[2]) { /* device identification */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1651 						target_dev_id, lu_id_num,
1652 						lu_id_str, len,
1653 						&devip->lu_name);
1654 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1655 			arr[1] = cmd[2];	/*sanity */
1656 			arr[3] = inquiry_vpd_84(&arr[4]);
1657 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = inquiry_vpd_85(&arr[4]);
1660 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1661 			arr[1] = cmd[2];	/*sanity */
1662 			arr[3] = 0x3c;	/* number of following entries */
1663 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1664 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1665 			else if (have_dif_prot)
1666 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1667 			else
1668 				arr[4] = 0x0;   /* no protection stuff */
1669 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1670 		} else if (0x87 == cmd[2]) { /* mode page policy */
1671 			arr[1] = cmd[2];	/*sanity */
1672 			arr[3] = 0x8;	/* number of following entries */
1673 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1674 			arr[6] = 0x80;	/* mlus, shared */
1675 			arr[8] = 0x18;	 /* protocol specific lu */
1676 			arr[10] = 0x82;	 /* mlus, per initiator port */
1677 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1678 			arr[1] = cmd[2];	/*sanity */
1679 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1680 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1681 			arr[1] = cmd[2];        /*sanity */
1682 			n = inquiry_vpd_89(&arr[4]);
1683 			put_unaligned_be16(n, arr + 2);
1684 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1685 			arr[1] = cmd[2];        /*sanity */
1686 			arr[3] = inquiry_vpd_b0(&arr[4]);
1687 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1688 			arr[1] = cmd[2];        /*sanity */
1689 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1690 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1691 			arr[1] = cmd[2];        /*sanity */
1692 			arr[3] = inquiry_vpd_b2(&arr[4]);
1693 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1694 			arr[1] = cmd[2];        /*sanity */
1695 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1696 		} else {
1697 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1698 			kfree(arr);
1699 			return check_condition_result;
1700 		}
1701 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1702 		ret = fill_from_dev_buffer(scp, arr,
1703 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1704 		kfree(arr);
1705 		return ret;
1706 	}
1707 	/* drops through here for a standard inquiry */
1708 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1709 	arr[2] = sdebug_scsi_level;
1710 	arr[3] = 2;    /* response_data_format==2 */
1711 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1712 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1713 	if (sdebug_vpd_use_hostno == 0)
1714 		arr[5] |= 0x10; /* claim: implicit TPGS */
1715 	arr[6] = 0x10; /* claim: MultiP */
1716 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1717 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1718 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1719 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1720 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1721 	/* Use Vendor Specific area to place driver date in ASCII hex */
1722 	memcpy(&arr[36], sdebug_version_date, 8);
1723 	/* version descriptors (2 bytes each) follow */
1724 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1725 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1726 	n = 62;
1727 	if (is_disk) {		/* SBC-4 no version claimed */
1728 		put_unaligned_be16(0x600, arr + n);
1729 		n += 2;
1730 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1731 		put_unaligned_be16(0x525, arr + n);
1732 		n += 2;
1733 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1734 		put_unaligned_be16(0x624, arr + n);
1735 		n += 2;
1736 	}
1737 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1738 	ret = fill_from_dev_buffer(scp, arr,
1739 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1740 	kfree(arr);
1741 	return ret;
1742 }
1743 
1744 /* See resp_iec_m_pg() for how this data is manipulated */
1745 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1746 				   0, 0, 0x0, 0x0};
1747 
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1748 static int resp_requests(struct scsi_cmnd *scp,
1749 			 struct sdebug_dev_info *devip)
1750 {
1751 	unsigned char *cmd = scp->cmnd;
1752 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1753 	bool dsense = !!(cmd[1] & 1);
1754 	u32 alloc_len = cmd[4];
1755 	u32 len = 18;
1756 	int stopped_state = atomic_read(&devip->stopped);
1757 
1758 	memset(arr, 0, sizeof(arr));
1759 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1760 		if (dsense) {
1761 			arr[0] = 0x72;
1762 			arr[1] = NOT_READY;
1763 			arr[2] = LOGICAL_UNIT_NOT_READY;
1764 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1765 			len = 8;
1766 		} else {
1767 			arr[0] = 0x70;
1768 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1769 			arr[7] = 0xa;			/* 18 byte sense buffer */
1770 			arr[12] = LOGICAL_UNIT_NOT_READY;
1771 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1772 		}
1773 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1774 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1775 		if (dsense) {
1776 			arr[0] = 0x72;
1777 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1778 			arr[2] = THRESHOLD_EXCEEDED;
1779 			arr[3] = 0xff;		/* Failure prediction(false) */
1780 			len = 8;
1781 		} else {
1782 			arr[0] = 0x70;
1783 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1784 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1785 			arr[12] = THRESHOLD_EXCEEDED;
1786 			arr[13] = 0xff;		/* Failure prediction(false) */
1787 		}
1788 	} else {	/* nothing to report */
1789 		if (dsense) {
1790 			len = 8;
1791 			memset(arr, 0, len);
1792 			arr[0] = 0x72;
1793 		} else {
1794 			memset(arr, 0, len);
1795 			arr[0] = 0x70;
1796 			arr[7] = 0xa;
1797 		}
1798 	}
1799 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1800 }
1801 
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1802 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1803 {
1804 	unsigned char *cmd = scp->cmnd;
1805 	int power_cond, want_stop, stopped_state;
1806 	bool changing;
1807 
1808 	power_cond = (cmd[4] & 0xf0) >> 4;
1809 	if (power_cond) {
1810 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1811 		return check_condition_result;
1812 	}
1813 	want_stop = !(cmd[4] & 1);
1814 	stopped_state = atomic_read(&devip->stopped);
1815 	if (stopped_state == 2) {
1816 		ktime_t now_ts = ktime_get_boottime();
1817 
1818 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1819 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1820 
1821 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1822 				/* tur_ms_to_ready timer extinguished */
1823 				atomic_set(&devip->stopped, 0);
1824 				stopped_state = 0;
1825 			}
1826 		}
1827 		if (stopped_state == 2) {
1828 			if (want_stop) {
1829 				stopped_state = 1;	/* dummy up success */
1830 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1831 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1832 				return check_condition_result;
1833 			}
1834 		}
1835 	}
1836 	changing = (stopped_state != want_stop);
1837 	if (changing)
1838 		atomic_xchg(&devip->stopped, want_stop);
1839 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1840 		return SDEG_RES_IMMED_MASK;
1841 	else
1842 		return 0;
1843 }
1844 
get_sdebug_capacity(void)1845 static sector_t get_sdebug_capacity(void)
1846 {
1847 	static const unsigned int gibibyte = 1073741824;
1848 
1849 	if (sdebug_virtual_gb > 0)
1850 		return (sector_t)sdebug_virtual_gb *
1851 			(gibibyte / sdebug_sector_size);
1852 	else
1853 		return sdebug_store_sectors;
1854 }
1855 
1856 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1857 static int resp_readcap(struct scsi_cmnd *scp,
1858 			struct sdebug_dev_info *devip)
1859 {
1860 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1861 	unsigned int capac;
1862 
1863 	/* following just in case virtual_gb changed */
1864 	sdebug_capacity = get_sdebug_capacity();
1865 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1866 	if (sdebug_capacity < 0xffffffff) {
1867 		capac = (unsigned int)sdebug_capacity - 1;
1868 		put_unaligned_be32(capac, arr + 0);
1869 	} else
1870 		put_unaligned_be32(0xffffffff, arr + 0);
1871 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1872 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1873 }
1874 
1875 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1876 static int resp_readcap16(struct scsi_cmnd *scp,
1877 			  struct sdebug_dev_info *devip)
1878 {
1879 	unsigned char *cmd = scp->cmnd;
1880 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1881 	u32 alloc_len;
1882 
1883 	alloc_len = get_unaligned_be32(cmd + 10);
1884 	/* following just in case virtual_gb changed */
1885 	sdebug_capacity = get_sdebug_capacity();
1886 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1887 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1888 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1889 	arr[13] = sdebug_physblk_exp & 0xf;
1890 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1891 
1892 	if (scsi_debug_lbp()) {
1893 		arr[14] |= 0x80; /* LBPME */
1894 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1895 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1896 		 * in the wider field maps to 0 in this field.
1897 		 */
1898 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1899 			arr[14] |= 0x40;
1900 	}
1901 
1902 	/*
1903 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1904 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1905 	 */
1906 	if (devip->zmodel == BLK_ZONED_HM)
1907 		arr[12] |= 1 << 4;
1908 
1909 	arr[15] = sdebug_lowest_aligned & 0xff;
1910 
1911 	if (have_dif_prot) {
1912 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1913 		arr[12] |= 1; /* PROT_EN */
1914 	}
1915 
1916 	return fill_from_dev_buffer(scp, arr,
1917 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1918 }
1919 
1920 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1921 
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1922 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1923 			      struct sdebug_dev_info *devip)
1924 {
1925 	unsigned char *cmd = scp->cmnd;
1926 	unsigned char *arr;
1927 	int host_no = devip->sdbg_host->shost->host_no;
1928 	int port_group_a, port_group_b, port_a, port_b;
1929 	u32 alen, n, rlen;
1930 	int ret;
1931 
1932 	alen = get_unaligned_be32(cmd + 6);
1933 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1934 	if (! arr)
1935 		return DID_REQUEUE << 16;
1936 	/*
1937 	 * EVPD page 0x88 states we have two ports, one
1938 	 * real and a fake port with no device connected.
1939 	 * So we create two port groups with one port each
1940 	 * and set the group with port B to unavailable.
1941 	 */
1942 	port_a = 0x1; /* relative port A */
1943 	port_b = 0x2; /* relative port B */
1944 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1945 			(devip->channel & 0x7f);
1946 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1947 			(devip->channel & 0x7f) + 0x80;
1948 
1949 	/*
1950 	 * The asymmetric access state is cycled according to the host_id.
1951 	 */
1952 	n = 4;
1953 	if (sdebug_vpd_use_hostno == 0) {
1954 		arr[n++] = host_no % 3; /* Asymm access state */
1955 		arr[n++] = 0x0F; /* claim: all states are supported */
1956 	} else {
1957 		arr[n++] = 0x0; /* Active/Optimized path */
1958 		arr[n++] = 0x01; /* only support active/optimized paths */
1959 	}
1960 	put_unaligned_be16(port_group_a, arr + n);
1961 	n += 2;
1962 	arr[n++] = 0;    /* Reserved */
1963 	arr[n++] = 0;    /* Status code */
1964 	arr[n++] = 0;    /* Vendor unique */
1965 	arr[n++] = 0x1;  /* One port per group */
1966 	arr[n++] = 0;    /* Reserved */
1967 	arr[n++] = 0;    /* Reserved */
1968 	put_unaligned_be16(port_a, arr + n);
1969 	n += 2;
1970 	arr[n++] = 3;    /* Port unavailable */
1971 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1972 	put_unaligned_be16(port_group_b, arr + n);
1973 	n += 2;
1974 	arr[n++] = 0;    /* Reserved */
1975 	arr[n++] = 0;    /* Status code */
1976 	arr[n++] = 0;    /* Vendor unique */
1977 	arr[n++] = 0x1;  /* One port per group */
1978 	arr[n++] = 0;    /* Reserved */
1979 	arr[n++] = 0;    /* Reserved */
1980 	put_unaligned_be16(port_b, arr + n);
1981 	n += 2;
1982 
1983 	rlen = n - 4;
1984 	put_unaligned_be32(rlen, arr + 0);
1985 
1986 	/*
1987 	 * Return the smallest value of either
1988 	 * - The allocated length
1989 	 * - The constructed command length
1990 	 * - The maximum array size
1991 	 */
1992 	rlen = min(alen, n);
1993 	ret = fill_from_dev_buffer(scp, arr,
1994 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1995 	kfree(arr);
1996 	return ret;
1997 }
1998 
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1999 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2000 			     struct sdebug_dev_info *devip)
2001 {
2002 	bool rctd;
2003 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2004 	u16 req_sa, u;
2005 	u32 alloc_len, a_len;
2006 	int k, offset, len, errsts, count, bump, na;
2007 	const struct opcode_info_t *oip;
2008 	const struct opcode_info_t *r_oip;
2009 	u8 *arr;
2010 	u8 *cmd = scp->cmnd;
2011 
2012 	rctd = !!(cmd[2] & 0x80);
2013 	reporting_opts = cmd[2] & 0x7;
2014 	req_opcode = cmd[3];
2015 	req_sa = get_unaligned_be16(cmd + 4);
2016 	alloc_len = get_unaligned_be32(cmd + 6);
2017 	if (alloc_len < 4 || alloc_len > 0xffff) {
2018 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2019 		return check_condition_result;
2020 	}
2021 	if (alloc_len > 8192)
2022 		a_len = 8192;
2023 	else
2024 		a_len = alloc_len;
2025 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2026 	if (NULL == arr) {
2027 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2028 				INSUFF_RES_ASCQ);
2029 		return check_condition_result;
2030 	}
2031 	switch (reporting_opts) {
2032 	case 0:	/* all commands */
2033 		/* count number of commands */
2034 		for (count = 0, oip = opcode_info_arr;
2035 		     oip->num_attached != 0xff; ++oip) {
2036 			if (F_INV_OP & oip->flags)
2037 				continue;
2038 			count += (oip->num_attached + 1);
2039 		}
2040 		bump = rctd ? 20 : 8;
2041 		put_unaligned_be32(count * bump, arr);
2042 		for (offset = 4, oip = opcode_info_arr;
2043 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2044 			if (F_INV_OP & oip->flags)
2045 				continue;
2046 			na = oip->num_attached;
2047 			arr[offset] = oip->opcode;
2048 			put_unaligned_be16(oip->sa, arr + offset + 2);
2049 			if (rctd)
2050 				arr[offset + 5] |= 0x2;
2051 			if (FF_SA & oip->flags)
2052 				arr[offset + 5] |= 0x1;
2053 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2054 			if (rctd)
2055 				put_unaligned_be16(0xa, arr + offset + 8);
2056 			r_oip = oip;
2057 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2058 				if (F_INV_OP & oip->flags)
2059 					continue;
2060 				offset += bump;
2061 				arr[offset] = oip->opcode;
2062 				put_unaligned_be16(oip->sa, arr + offset + 2);
2063 				if (rctd)
2064 					arr[offset + 5] |= 0x2;
2065 				if (FF_SA & oip->flags)
2066 					arr[offset + 5] |= 0x1;
2067 				put_unaligned_be16(oip->len_mask[0],
2068 						   arr + offset + 6);
2069 				if (rctd)
2070 					put_unaligned_be16(0xa,
2071 							   arr + offset + 8);
2072 			}
2073 			oip = r_oip;
2074 			offset += bump;
2075 		}
2076 		break;
2077 	case 1:	/* one command: opcode only */
2078 	case 2:	/* one command: opcode plus service action */
2079 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2080 		sdeb_i = opcode_ind_arr[req_opcode];
2081 		oip = &opcode_info_arr[sdeb_i];
2082 		if (F_INV_OP & oip->flags) {
2083 			supp = 1;
2084 			offset = 4;
2085 		} else {
2086 			if (1 == reporting_opts) {
2087 				if (FF_SA & oip->flags) {
2088 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2089 							     2, 2);
2090 					kfree(arr);
2091 					return check_condition_result;
2092 				}
2093 				req_sa = 0;
2094 			} else if (2 == reporting_opts &&
2095 				   0 == (FF_SA & oip->flags)) {
2096 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2097 				kfree(arr);	/* point at requested sa */
2098 				return check_condition_result;
2099 			}
2100 			if (0 == (FF_SA & oip->flags) &&
2101 			    req_opcode == oip->opcode)
2102 				supp = 3;
2103 			else if (0 == (FF_SA & oip->flags)) {
2104 				na = oip->num_attached;
2105 				for (k = 0, oip = oip->arrp; k < na;
2106 				     ++k, ++oip) {
2107 					if (req_opcode == oip->opcode)
2108 						break;
2109 				}
2110 				supp = (k >= na) ? 1 : 3;
2111 			} else if (req_sa != oip->sa) {
2112 				na = oip->num_attached;
2113 				for (k = 0, oip = oip->arrp; k < na;
2114 				     ++k, ++oip) {
2115 					if (req_sa == oip->sa)
2116 						break;
2117 				}
2118 				supp = (k >= na) ? 1 : 3;
2119 			} else
2120 				supp = 3;
2121 			if (3 == supp) {
2122 				u = oip->len_mask[0];
2123 				put_unaligned_be16(u, arr + 2);
2124 				arr[4] = oip->opcode;
2125 				for (k = 1; k < u; ++k)
2126 					arr[4 + k] = (k < 16) ?
2127 						 oip->len_mask[k] : 0xff;
2128 				offset = 4 + u;
2129 			} else
2130 				offset = 4;
2131 		}
2132 		arr[1] = (rctd ? 0x80 : 0) | supp;
2133 		if (rctd) {
2134 			put_unaligned_be16(0xa, arr + offset);
2135 			offset += 12;
2136 		}
2137 		break;
2138 	default:
2139 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2140 		kfree(arr);
2141 		return check_condition_result;
2142 	}
2143 	offset = (offset < a_len) ? offset : a_len;
2144 	len = (offset < alloc_len) ? offset : alloc_len;
2145 	errsts = fill_from_dev_buffer(scp, arr, len);
2146 	kfree(arr);
2147 	return errsts;
2148 }
2149 
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2150 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2151 			  struct sdebug_dev_info *devip)
2152 {
2153 	bool repd;
2154 	u32 alloc_len, len;
2155 	u8 arr[16];
2156 	u8 *cmd = scp->cmnd;
2157 
2158 	memset(arr, 0, sizeof(arr));
2159 	repd = !!(cmd[2] & 0x80);
2160 	alloc_len = get_unaligned_be32(cmd + 6);
2161 	if (alloc_len < 4) {
2162 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2163 		return check_condition_result;
2164 	}
2165 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2166 	arr[1] = 0x1;		/* ITNRS */
2167 	if (repd) {
2168 		arr[3] = 0xc;
2169 		len = 16;
2170 	} else
2171 		len = 4;
2172 
2173 	len = (len < alloc_len) ? len : alloc_len;
2174 	return fill_from_dev_buffer(scp, arr, len);
2175 }
2176 
2177 /* <<Following mode page info copied from ST318451LW>> */
2178 
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2179 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2180 {	/* Read-Write Error Recovery page for mode_sense */
2181 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2182 					5, 0, 0xff, 0xff};
2183 
2184 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2185 	if (1 == pcontrol)
2186 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2187 	return sizeof(err_recov_pg);
2188 }
2189 
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2190 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2191 { 	/* Disconnect-Reconnect page for mode_sense */
2192 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2193 					 0, 0, 0, 0, 0, 0, 0, 0};
2194 
2195 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2196 	if (1 == pcontrol)
2197 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2198 	return sizeof(disconnect_pg);
2199 }
2200 
resp_format_pg(unsigned char * p,int pcontrol,int target)2201 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2202 {       /* Format device page for mode_sense */
2203 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2204 				     0, 0, 0, 0, 0, 0, 0, 0,
2205 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2206 
2207 	memcpy(p, format_pg, sizeof(format_pg));
2208 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2209 	put_unaligned_be16(sdebug_sector_size, p + 12);
2210 	if (sdebug_removable)
2211 		p[20] |= 0x20; /* should agree with INQUIRY */
2212 	if (1 == pcontrol)
2213 		memset(p + 2, 0, sizeof(format_pg) - 2);
2214 	return sizeof(format_pg);
2215 }
2216 
2217 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2218 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2219 				     0, 0, 0, 0};
2220 
resp_caching_pg(unsigned char * p,int pcontrol,int target)2221 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2222 { 	/* Caching page for mode_sense */
2223 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2224 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2225 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2226 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2227 
2228 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2229 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2230 	memcpy(p, caching_pg, sizeof(caching_pg));
2231 	if (1 == pcontrol)
2232 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2233 	else if (2 == pcontrol)
2234 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2235 	return sizeof(caching_pg);
2236 }
2237 
2238 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2239 				    0, 0, 0x2, 0x4b};
2240 
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2241 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2242 { 	/* Control mode page for mode_sense */
2243 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2244 					0, 0, 0, 0};
2245 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2246 				     0, 0, 0x2, 0x4b};
2247 
2248 	if (sdebug_dsense)
2249 		ctrl_m_pg[2] |= 0x4;
2250 	else
2251 		ctrl_m_pg[2] &= ~0x4;
2252 
2253 	if (sdebug_ato)
2254 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2255 
2256 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2257 	if (1 == pcontrol)
2258 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2259 	else if (2 == pcontrol)
2260 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2261 	return sizeof(ctrl_m_pg);
2262 }
2263 
2264 
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2265 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2266 {	/* Informational Exceptions control mode page for mode_sense */
2267 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2268 				       0, 0, 0x0, 0x0};
2269 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2270 				      0, 0, 0x0, 0x0};
2271 
2272 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2273 	if (1 == pcontrol)
2274 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2275 	else if (2 == pcontrol)
2276 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2277 	return sizeof(iec_m_pg);
2278 }
2279 
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2280 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2281 {	/* SAS SSP mode page - short format for mode_sense */
2282 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2283 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2284 
2285 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2286 	if (1 == pcontrol)
2287 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2288 	return sizeof(sas_sf_m_pg);
2289 }
2290 
2291 
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2292 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2293 			      int target_dev_id)
2294 {	/* SAS phy control and discover mode page for mode_sense */
2295 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2296 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2297 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2298 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2299 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2300 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2301 		    0, 0, 0, 0, 0, 0, 0, 0,
2302 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2303 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2304 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2305 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2306 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2307 		    0, 0, 0, 0, 0, 0, 0, 0,
2308 		};
2309 	int port_a, port_b;
2310 
2311 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2312 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2313 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2314 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2315 	port_a = target_dev_id + 1;
2316 	port_b = port_a + 1;
2317 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2318 	put_unaligned_be32(port_a, p + 20);
2319 	put_unaligned_be32(port_b, p + 48 + 20);
2320 	if (1 == pcontrol)
2321 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2322 	return sizeof(sas_pcd_m_pg);
2323 }
2324 
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2325 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2326 {	/* SAS SSP shared protocol specific port mode subpage */
2327 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2328 		    0, 0, 0, 0, 0, 0, 0, 0,
2329 		};
2330 
2331 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2332 	if (1 == pcontrol)
2333 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2334 	return sizeof(sas_sha_m_pg);
2335 }
2336 
2337 #define SDEBUG_MAX_MSENSE_SZ 256
2338 
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2339 static int resp_mode_sense(struct scsi_cmnd *scp,
2340 			   struct sdebug_dev_info *devip)
2341 {
2342 	int pcontrol, pcode, subpcode, bd_len;
2343 	unsigned char dev_spec;
2344 	u32 alloc_len, offset, len;
2345 	int target_dev_id;
2346 	int target = scp->device->id;
2347 	unsigned char *ap;
2348 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2349 	unsigned char *cmd = scp->cmnd;
2350 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2351 
2352 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2353 	pcontrol = (cmd[2] & 0xc0) >> 6;
2354 	pcode = cmd[2] & 0x3f;
2355 	subpcode = cmd[3];
2356 	msense_6 = (MODE_SENSE == cmd[0]);
2357 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2358 	is_disk = (sdebug_ptype == TYPE_DISK);
2359 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2360 	if ((is_disk || is_zbc) && !dbd)
2361 		bd_len = llbaa ? 16 : 8;
2362 	else
2363 		bd_len = 0;
2364 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2365 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2366 	if (0x3 == pcontrol) {  /* Saving values not supported */
2367 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2368 		return check_condition_result;
2369 	}
2370 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2371 			(devip->target * 1000) - 3;
2372 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2373 	if (is_disk || is_zbc) {
2374 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2375 		if (sdebug_wp)
2376 			dev_spec |= 0x80;
2377 	} else
2378 		dev_spec = 0x0;
2379 	if (msense_6) {
2380 		arr[2] = dev_spec;
2381 		arr[3] = bd_len;
2382 		offset = 4;
2383 	} else {
2384 		arr[3] = dev_spec;
2385 		if (16 == bd_len)
2386 			arr[4] = 0x1;	/* set LONGLBA bit */
2387 		arr[7] = bd_len;	/* assume 255 or less */
2388 		offset = 8;
2389 	}
2390 	ap = arr + offset;
2391 	if ((bd_len > 0) && (!sdebug_capacity))
2392 		sdebug_capacity = get_sdebug_capacity();
2393 
2394 	if (8 == bd_len) {
2395 		if (sdebug_capacity > 0xfffffffe)
2396 			put_unaligned_be32(0xffffffff, ap + 0);
2397 		else
2398 			put_unaligned_be32(sdebug_capacity, ap + 0);
2399 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2400 		offset += bd_len;
2401 		ap = arr + offset;
2402 	} else if (16 == bd_len) {
2403 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2404 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2405 		offset += bd_len;
2406 		ap = arr + offset;
2407 	}
2408 
2409 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2410 		/* TODO: Control Extension page */
2411 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2412 		return check_condition_result;
2413 	}
2414 	bad_pcode = false;
2415 
2416 	switch (pcode) {
2417 	case 0x1:	/* Read-Write error recovery page, direct access */
2418 		len = resp_err_recov_pg(ap, pcontrol, target);
2419 		offset += len;
2420 		break;
2421 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2422 		len = resp_disconnect_pg(ap, pcontrol, target);
2423 		offset += len;
2424 		break;
2425 	case 0x3:       /* Format device page, direct access */
2426 		if (is_disk) {
2427 			len = resp_format_pg(ap, pcontrol, target);
2428 			offset += len;
2429 		} else
2430 			bad_pcode = true;
2431 		break;
2432 	case 0x8:	/* Caching page, direct access */
2433 		if (is_disk || is_zbc) {
2434 			len = resp_caching_pg(ap, pcontrol, target);
2435 			offset += len;
2436 		} else
2437 			bad_pcode = true;
2438 		break;
2439 	case 0xa:	/* Control Mode page, all devices */
2440 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2441 		offset += len;
2442 		break;
2443 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2444 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2445 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2446 			return check_condition_result;
2447 		}
2448 		len = 0;
2449 		if ((0x0 == subpcode) || (0xff == subpcode))
2450 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2451 		if ((0x1 == subpcode) || (0xff == subpcode))
2452 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2453 						  target_dev_id);
2454 		if ((0x2 == subpcode) || (0xff == subpcode))
2455 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2456 		offset += len;
2457 		break;
2458 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2459 		len = resp_iec_m_pg(ap, pcontrol, target);
2460 		offset += len;
2461 		break;
2462 	case 0x3f:	/* Read all Mode pages */
2463 		if ((0 == subpcode) || (0xff == subpcode)) {
2464 			len = resp_err_recov_pg(ap, pcontrol, target);
2465 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2466 			if (is_disk) {
2467 				len += resp_format_pg(ap + len, pcontrol,
2468 						      target);
2469 				len += resp_caching_pg(ap + len, pcontrol,
2470 						       target);
2471 			} else if (is_zbc) {
2472 				len += resp_caching_pg(ap + len, pcontrol,
2473 						       target);
2474 			}
2475 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2476 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2477 			if (0xff == subpcode) {
2478 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2479 						  target, target_dev_id);
2480 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2481 			}
2482 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2483 			offset += len;
2484 		} else {
2485 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2486 			return check_condition_result;
2487 		}
2488 		break;
2489 	default:
2490 		bad_pcode = true;
2491 		break;
2492 	}
2493 	if (bad_pcode) {
2494 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2495 		return check_condition_result;
2496 	}
2497 	if (msense_6)
2498 		arr[0] = offset - 1;
2499 	else
2500 		put_unaligned_be16((offset - 2), arr + 0);
2501 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2502 }
2503 
2504 #define SDEBUG_MAX_MSELECT_SZ 512
2505 
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2506 static int resp_mode_select(struct scsi_cmnd *scp,
2507 			    struct sdebug_dev_info *devip)
2508 {
2509 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2510 	int param_len, res, mpage;
2511 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2512 	unsigned char *cmd = scp->cmnd;
2513 	int mselect6 = (MODE_SELECT == cmd[0]);
2514 
2515 	memset(arr, 0, sizeof(arr));
2516 	pf = cmd[1] & 0x10;
2517 	sp = cmd[1] & 0x1;
2518 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2519 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2520 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2521 		return check_condition_result;
2522 	}
2523 	res = fetch_to_dev_buffer(scp, arr, param_len);
2524 	if (-1 == res)
2525 		return DID_ERROR << 16;
2526 	else if (sdebug_verbose && (res < param_len))
2527 		sdev_printk(KERN_INFO, scp->device,
2528 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2529 			    __func__, param_len, res);
2530 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2531 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2532 	off = bd_len + (mselect6 ? 4 : 8);
2533 	if (md_len > 2 || off >= res) {
2534 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2535 		return check_condition_result;
2536 	}
2537 	mpage = arr[off] & 0x3f;
2538 	ps = !!(arr[off] & 0x80);
2539 	if (ps) {
2540 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2541 		return check_condition_result;
2542 	}
2543 	spf = !!(arr[off] & 0x40);
2544 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2545 		       (arr[off + 1] + 2);
2546 	if ((pg_len + off) > param_len) {
2547 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2548 				PARAMETER_LIST_LENGTH_ERR, 0);
2549 		return check_condition_result;
2550 	}
2551 	switch (mpage) {
2552 	case 0x8:      /* Caching Mode page */
2553 		if (caching_pg[1] == arr[off + 1]) {
2554 			memcpy(caching_pg + 2, arr + off + 2,
2555 			       sizeof(caching_pg) - 2);
2556 			goto set_mode_changed_ua;
2557 		}
2558 		break;
2559 	case 0xa:      /* Control Mode page */
2560 		if (ctrl_m_pg[1] == arr[off + 1]) {
2561 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2562 			       sizeof(ctrl_m_pg) - 2);
2563 			if (ctrl_m_pg[4] & 0x8)
2564 				sdebug_wp = true;
2565 			else
2566 				sdebug_wp = false;
2567 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2568 			goto set_mode_changed_ua;
2569 		}
2570 		break;
2571 	case 0x1c:      /* Informational Exceptions Mode page */
2572 		if (iec_m_pg[1] == arr[off + 1]) {
2573 			memcpy(iec_m_pg + 2, arr + off + 2,
2574 			       sizeof(iec_m_pg) - 2);
2575 			goto set_mode_changed_ua;
2576 		}
2577 		break;
2578 	default:
2579 		break;
2580 	}
2581 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2582 	return check_condition_result;
2583 set_mode_changed_ua:
2584 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2585 	return 0;
2586 }
2587 
resp_temp_l_pg(unsigned char * arr)2588 static int resp_temp_l_pg(unsigned char *arr)
2589 {
2590 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2591 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2592 		};
2593 
2594 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2595 	return sizeof(temp_l_pg);
2596 }
2597 
resp_ie_l_pg(unsigned char * arr)2598 static int resp_ie_l_pg(unsigned char *arr)
2599 {
2600 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2601 		};
2602 
2603 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2604 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2605 		arr[4] = THRESHOLD_EXCEEDED;
2606 		arr[5] = 0xff;
2607 	}
2608 	return sizeof(ie_l_pg);
2609 }
2610 
resp_env_rep_l_spg(unsigned char * arr)2611 static int resp_env_rep_l_spg(unsigned char *arr)
2612 {
2613 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2614 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2615 					 0x1, 0x0, 0x23, 0x8,
2616 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2617 		};
2618 
2619 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2620 	return sizeof(env_rep_l_spg);
2621 }
2622 
2623 #define SDEBUG_MAX_LSENSE_SZ 512
2624 
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2625 static int resp_log_sense(struct scsi_cmnd *scp,
2626 			  struct sdebug_dev_info *devip)
2627 {
2628 	int ppc, sp, pcode, subpcode;
2629 	u32 alloc_len, len, n;
2630 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2631 	unsigned char *cmd = scp->cmnd;
2632 
2633 	memset(arr, 0, sizeof(arr));
2634 	ppc = cmd[1] & 0x2;
2635 	sp = cmd[1] & 0x1;
2636 	if (ppc || sp) {
2637 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2638 		return check_condition_result;
2639 	}
2640 	pcode = cmd[2] & 0x3f;
2641 	subpcode = cmd[3] & 0xff;
2642 	alloc_len = get_unaligned_be16(cmd + 7);
2643 	arr[0] = pcode;
2644 	if (0 == subpcode) {
2645 		switch (pcode) {
2646 		case 0x0:	/* Supported log pages log page */
2647 			n = 4;
2648 			arr[n++] = 0x0;		/* this page */
2649 			arr[n++] = 0xd;		/* Temperature */
2650 			arr[n++] = 0x2f;	/* Informational exceptions */
2651 			arr[3] = n - 4;
2652 			break;
2653 		case 0xd:	/* Temperature log page */
2654 			arr[3] = resp_temp_l_pg(arr + 4);
2655 			break;
2656 		case 0x2f:	/* Informational exceptions log page */
2657 			arr[3] = resp_ie_l_pg(arr + 4);
2658 			break;
2659 		default:
2660 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2661 			return check_condition_result;
2662 		}
2663 	} else if (0xff == subpcode) {
2664 		arr[0] |= 0x40;
2665 		arr[1] = subpcode;
2666 		switch (pcode) {
2667 		case 0x0:	/* Supported log pages and subpages log page */
2668 			n = 4;
2669 			arr[n++] = 0x0;
2670 			arr[n++] = 0x0;		/* 0,0 page */
2671 			arr[n++] = 0x0;
2672 			arr[n++] = 0xff;	/* this page */
2673 			arr[n++] = 0xd;
2674 			arr[n++] = 0x0;		/* Temperature */
2675 			arr[n++] = 0xd;
2676 			arr[n++] = 0x1;		/* Environment reporting */
2677 			arr[n++] = 0xd;
2678 			arr[n++] = 0xff;	/* all 0xd subpages */
2679 			arr[n++] = 0x2f;
2680 			arr[n++] = 0x0;	/* Informational exceptions */
2681 			arr[n++] = 0x2f;
2682 			arr[n++] = 0xff;	/* all 0x2f subpages */
2683 			arr[3] = n - 4;
2684 			break;
2685 		case 0xd:	/* Temperature subpages */
2686 			n = 4;
2687 			arr[n++] = 0xd;
2688 			arr[n++] = 0x0;		/* Temperature */
2689 			arr[n++] = 0xd;
2690 			arr[n++] = 0x1;		/* Environment reporting */
2691 			arr[n++] = 0xd;
2692 			arr[n++] = 0xff;	/* these subpages */
2693 			arr[3] = n - 4;
2694 			break;
2695 		case 0x2f:	/* Informational exceptions subpages */
2696 			n = 4;
2697 			arr[n++] = 0x2f;
2698 			arr[n++] = 0x0;		/* Informational exceptions */
2699 			arr[n++] = 0x2f;
2700 			arr[n++] = 0xff;	/* these subpages */
2701 			arr[3] = n - 4;
2702 			break;
2703 		default:
2704 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2705 			return check_condition_result;
2706 		}
2707 	} else if (subpcode > 0) {
2708 		arr[0] |= 0x40;
2709 		arr[1] = subpcode;
2710 		if (pcode == 0xd && subpcode == 1)
2711 			arr[3] = resp_env_rep_l_spg(arr + 4);
2712 		else {
2713 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2714 			return check_condition_result;
2715 		}
2716 	} else {
2717 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2718 		return check_condition_result;
2719 	}
2720 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2721 	return fill_from_dev_buffer(scp, arr,
2722 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2723 }
2724 
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)2725 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2726 {
2727 	return devip->nr_zones != 0;
2728 }
2729 
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)2730 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2731 					unsigned long long lba)
2732 {
2733 	u32 zno = lba >> devip->zsize_shift;
2734 	struct sdeb_zone_state *zsp;
2735 
2736 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2737 		return &devip->zstate[zno];
2738 
2739 	/*
2740 	 * If the zone capacity is less than the zone size, adjust for gap
2741 	 * zones.
2742 	 */
2743 	zno = 2 * zno - devip->nr_conv_zones;
2744 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2745 	zsp = &devip->zstate[zno];
2746 	if (lba >= zsp->z_start + zsp->z_size)
2747 		zsp++;
2748 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2749 	return zsp;
2750 }
2751 
zbc_zone_is_conv(struct sdeb_zone_state * zsp)2752 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2753 {
2754 	return zsp->z_type == ZBC_ZTYPE_CNV;
2755 }
2756 
zbc_zone_is_gap(struct sdeb_zone_state * zsp)2757 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2758 {
2759 	return zsp->z_type == ZBC_ZTYPE_GAP;
2760 }
2761 
zbc_zone_is_seq(struct sdeb_zone_state * zsp)2762 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2763 {
2764 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2765 }
2766 
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2767 static void zbc_close_zone(struct sdebug_dev_info *devip,
2768 			   struct sdeb_zone_state *zsp)
2769 {
2770 	enum sdebug_z_cond zc;
2771 
2772 	if (!zbc_zone_is_seq(zsp))
2773 		return;
2774 
2775 	zc = zsp->z_cond;
2776 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2777 		return;
2778 
2779 	if (zc == ZC2_IMPLICIT_OPEN)
2780 		devip->nr_imp_open--;
2781 	else
2782 		devip->nr_exp_open--;
2783 
2784 	if (zsp->z_wp == zsp->z_start) {
2785 		zsp->z_cond = ZC1_EMPTY;
2786 	} else {
2787 		zsp->z_cond = ZC4_CLOSED;
2788 		devip->nr_closed++;
2789 	}
2790 }
2791 
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)2792 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2793 {
2794 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2795 	unsigned int i;
2796 
2797 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2798 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2799 			zbc_close_zone(devip, zsp);
2800 			return;
2801 		}
2802 	}
2803 }
2804 
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)2805 static void zbc_open_zone(struct sdebug_dev_info *devip,
2806 			  struct sdeb_zone_state *zsp, bool explicit)
2807 {
2808 	enum sdebug_z_cond zc;
2809 
2810 	if (!zbc_zone_is_seq(zsp))
2811 		return;
2812 
2813 	zc = zsp->z_cond;
2814 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2815 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2816 		return;
2817 
2818 	/* Close an implicit open zone if necessary */
2819 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2820 		zbc_close_zone(devip, zsp);
2821 	else if (devip->max_open &&
2822 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2823 		zbc_close_imp_open_zone(devip);
2824 
2825 	if (zsp->z_cond == ZC4_CLOSED)
2826 		devip->nr_closed--;
2827 	if (explicit) {
2828 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2829 		devip->nr_exp_open++;
2830 	} else {
2831 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2832 		devip->nr_imp_open++;
2833 	}
2834 }
2835 
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2836 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2837 				     struct sdeb_zone_state *zsp)
2838 {
2839 	switch (zsp->z_cond) {
2840 	case ZC2_IMPLICIT_OPEN:
2841 		devip->nr_imp_open--;
2842 		break;
2843 	case ZC3_EXPLICIT_OPEN:
2844 		devip->nr_exp_open--;
2845 		break;
2846 	default:
2847 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2848 			  zsp->z_start, zsp->z_cond);
2849 		break;
2850 	}
2851 	zsp->z_cond = ZC5_FULL;
2852 }
2853 
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)2854 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2855 		       unsigned long long lba, unsigned int num)
2856 {
2857 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2858 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2859 
2860 	if (!zbc_zone_is_seq(zsp))
2861 		return;
2862 
2863 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2864 		zsp->z_wp += num;
2865 		if (zsp->z_wp >= zend)
2866 			zbc_set_zone_full(devip, zsp);
2867 		return;
2868 	}
2869 
2870 	while (num) {
2871 		if (lba != zsp->z_wp)
2872 			zsp->z_non_seq_resource = true;
2873 
2874 		end = lba + num;
2875 		if (end >= zend) {
2876 			n = zend - lba;
2877 			zsp->z_wp = zend;
2878 		} else if (end > zsp->z_wp) {
2879 			n = num;
2880 			zsp->z_wp = end;
2881 		} else {
2882 			n = num;
2883 		}
2884 		if (zsp->z_wp >= zend)
2885 			zbc_set_zone_full(devip, zsp);
2886 
2887 		num -= n;
2888 		lba += n;
2889 		if (num) {
2890 			zsp++;
2891 			zend = zsp->z_start + zsp->z_size;
2892 		}
2893 	}
2894 }
2895 
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2896 static int check_zbc_access_params(struct scsi_cmnd *scp,
2897 			unsigned long long lba, unsigned int num, bool write)
2898 {
2899 	struct scsi_device *sdp = scp->device;
2900 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2901 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2902 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2903 
2904 	if (!write) {
2905 		if (devip->zmodel == BLK_ZONED_HA)
2906 			return 0;
2907 		/* For host-managed, reads cannot cross zone types boundaries */
2908 		if (zsp->z_type != zsp_end->z_type) {
2909 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2910 					LBA_OUT_OF_RANGE,
2911 					READ_INVDATA_ASCQ);
2912 			return check_condition_result;
2913 		}
2914 		return 0;
2915 	}
2916 
2917 	/* Writing into a gap zone is not allowed */
2918 	if (zbc_zone_is_gap(zsp)) {
2919 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2920 				ATTEMPT_ACCESS_GAP);
2921 		return check_condition_result;
2922 	}
2923 
2924 	/* No restrictions for writes within conventional zones */
2925 	if (zbc_zone_is_conv(zsp)) {
2926 		if (!zbc_zone_is_conv(zsp_end)) {
2927 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2928 					LBA_OUT_OF_RANGE,
2929 					WRITE_BOUNDARY_ASCQ);
2930 			return check_condition_result;
2931 		}
2932 		return 0;
2933 	}
2934 
2935 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2936 		/* Writes cannot cross sequential zone boundaries */
2937 		if (zsp_end != zsp) {
2938 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2939 					LBA_OUT_OF_RANGE,
2940 					WRITE_BOUNDARY_ASCQ);
2941 			return check_condition_result;
2942 		}
2943 		/* Cannot write full zones */
2944 		if (zsp->z_cond == ZC5_FULL) {
2945 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2946 					INVALID_FIELD_IN_CDB, 0);
2947 			return check_condition_result;
2948 		}
2949 		/* Writes must be aligned to the zone WP */
2950 		if (lba != zsp->z_wp) {
2951 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2952 					LBA_OUT_OF_RANGE,
2953 					UNALIGNED_WRITE_ASCQ);
2954 			return check_condition_result;
2955 		}
2956 	}
2957 
2958 	/* Handle implicit open of closed and empty zones */
2959 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2960 		if (devip->max_open &&
2961 		    devip->nr_exp_open >= devip->max_open) {
2962 			mk_sense_buffer(scp, DATA_PROTECT,
2963 					INSUFF_RES_ASC,
2964 					INSUFF_ZONE_ASCQ);
2965 			return check_condition_result;
2966 		}
2967 		zbc_open_zone(devip, zsp, false);
2968 	}
2969 
2970 	return 0;
2971 }
2972 
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2973 static inline int check_device_access_params
2974 			(struct scsi_cmnd *scp, unsigned long long lba,
2975 			 unsigned int num, bool write)
2976 {
2977 	struct scsi_device *sdp = scp->device;
2978 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2979 
2980 	if (lba + num > sdebug_capacity) {
2981 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2982 		return check_condition_result;
2983 	}
2984 	/* transfer length excessive (tie in to block limits VPD page) */
2985 	if (num > sdebug_store_sectors) {
2986 		/* needs work to find which cdb byte 'num' comes from */
2987 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2988 		return check_condition_result;
2989 	}
2990 	if (write && unlikely(sdebug_wp)) {
2991 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2992 		return check_condition_result;
2993 	}
2994 	if (sdebug_dev_is_zoned(devip))
2995 		return check_zbc_access_params(scp, lba, num, write);
2996 
2997 	return 0;
2998 }
2999 
3000 /*
3001  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3002  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3003  * that access any of the "stores" in struct sdeb_store_info should call this
3004  * function with bug_if_fake_rw set to true.
3005  */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)3006 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3007 						bool bug_if_fake_rw)
3008 {
3009 	if (sdebug_fake_rw) {
3010 		BUG_ON(bug_if_fake_rw);	/* See note above */
3011 		return NULL;
3012 	}
3013 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3014 }
3015 
3016 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,bool do_write)3017 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3018 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3019 {
3020 	int ret;
3021 	u64 block, rest = 0;
3022 	enum dma_data_direction dir;
3023 	struct scsi_data_buffer *sdb = &scp->sdb;
3024 	u8 *fsp;
3025 
3026 	if (do_write) {
3027 		dir = DMA_TO_DEVICE;
3028 		write_since_sync = true;
3029 	} else {
3030 		dir = DMA_FROM_DEVICE;
3031 	}
3032 
3033 	if (!sdb->length || !sip)
3034 		return 0;
3035 	if (scp->sc_data_direction != dir)
3036 		return -1;
3037 	fsp = sip->storep;
3038 
3039 	block = do_div(lba, sdebug_store_sectors);
3040 	if (block + num > sdebug_store_sectors)
3041 		rest = block + num - sdebug_store_sectors;
3042 
3043 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3044 		   fsp + (block * sdebug_sector_size),
3045 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3046 	if (ret != (num - rest) * sdebug_sector_size)
3047 		return ret;
3048 
3049 	if (rest) {
3050 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3051 			    fsp, rest * sdebug_sector_size,
3052 			    sg_skip + ((num - rest) * sdebug_sector_size),
3053 			    do_write);
3054 	}
3055 
3056 	return ret;
3057 }
3058 
3059 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3060 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3061 {
3062 	struct scsi_data_buffer *sdb = &scp->sdb;
3063 
3064 	if (!sdb->length)
3065 		return 0;
3066 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3067 		return -1;
3068 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3069 			      num * sdebug_sector_size, 0, true);
3070 }
3071 
3072 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3073  * arr into sip->storep+lba and return true. If comparison fails then
3074  * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3075 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3076 			      const u8 *arr, bool compare_only)
3077 {
3078 	bool res;
3079 	u64 block, rest = 0;
3080 	u32 store_blks = sdebug_store_sectors;
3081 	u32 lb_size = sdebug_sector_size;
3082 	u8 *fsp = sip->storep;
3083 
3084 	block = do_div(lba, store_blks);
3085 	if (block + num > store_blks)
3086 		rest = block + num - store_blks;
3087 
3088 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3089 	if (!res)
3090 		return res;
3091 	if (rest)
3092 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3093 			     rest * lb_size);
3094 	if (!res)
3095 		return res;
3096 	if (compare_only)
3097 		return true;
3098 	arr += num * lb_size;
3099 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3100 	if (rest)
3101 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3102 	return res;
3103 }
3104 
dif_compute_csum(const void * buf,int len)3105 static __be16 dif_compute_csum(const void *buf, int len)
3106 {
3107 	__be16 csum;
3108 
3109 	if (sdebug_guard)
3110 		csum = (__force __be16)ip_compute_csum(buf, len);
3111 	else
3112 		csum = cpu_to_be16(crc_t10dif(buf, len));
3113 
3114 	return csum;
3115 }
3116 
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3117 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3118 		      sector_t sector, u32 ei_lba)
3119 {
3120 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3121 
3122 	if (sdt->guard_tag != csum) {
3123 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3124 			(unsigned long)sector,
3125 			be16_to_cpu(sdt->guard_tag),
3126 			be16_to_cpu(csum));
3127 		return 0x01;
3128 	}
3129 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3130 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3131 		pr_err("REF check failed on sector %lu\n",
3132 			(unsigned long)sector);
3133 		return 0x03;
3134 	}
3135 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3136 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3137 		pr_err("REF check failed on sector %lu\n",
3138 			(unsigned long)sector);
3139 		return 0x03;
3140 	}
3141 	return 0;
3142 }
3143 
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3144 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3145 			  unsigned int sectors, bool read)
3146 {
3147 	size_t resid;
3148 	void *paddr;
3149 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3150 						scp->device->hostdata, true);
3151 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3152 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3153 	struct sg_mapping_iter miter;
3154 
3155 	/* Bytes of protection data to copy into sgl */
3156 	resid = sectors * sizeof(*dif_storep);
3157 
3158 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3159 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3160 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3161 
3162 	while (sg_miter_next(&miter) && resid > 0) {
3163 		size_t len = min_t(size_t, miter.length, resid);
3164 		void *start = dif_store(sip, sector);
3165 		size_t rest = 0;
3166 
3167 		if (dif_store_end < start + len)
3168 			rest = start + len - dif_store_end;
3169 
3170 		paddr = miter.addr;
3171 
3172 		if (read)
3173 			memcpy(paddr, start, len - rest);
3174 		else
3175 			memcpy(start, paddr, len - rest);
3176 
3177 		if (rest) {
3178 			if (read)
3179 				memcpy(paddr + len - rest, dif_storep, rest);
3180 			else
3181 				memcpy(dif_storep, paddr + len - rest, rest);
3182 		}
3183 
3184 		sector += len / sizeof(*dif_storep);
3185 		resid -= len;
3186 	}
3187 	sg_miter_stop(&miter);
3188 }
3189 
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3190 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3191 			    unsigned int sectors, u32 ei_lba)
3192 {
3193 	int ret = 0;
3194 	unsigned int i;
3195 	sector_t sector;
3196 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3197 						scp->device->hostdata, true);
3198 	struct t10_pi_tuple *sdt;
3199 
3200 	for (i = 0; i < sectors; i++, ei_lba++) {
3201 		sector = start_sec + i;
3202 		sdt = dif_store(sip, sector);
3203 
3204 		if (sdt->app_tag == cpu_to_be16(0xffff))
3205 			continue;
3206 
3207 		/*
3208 		 * Because scsi_debug acts as both initiator and
3209 		 * target we proceed to verify the PI even if
3210 		 * RDPROTECT=3. This is done so the "initiator" knows
3211 		 * which type of error to return. Otherwise we would
3212 		 * have to iterate over the PI twice.
3213 		 */
3214 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3215 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3216 					 sector, ei_lba);
3217 			if (ret) {
3218 				dif_errors++;
3219 				break;
3220 			}
3221 		}
3222 	}
3223 
3224 	dif_copy_prot(scp, start_sec, sectors, true);
3225 	dix_reads++;
3226 
3227 	return ret;
3228 }
3229 
3230 static inline void
sdeb_read_lock(struct sdeb_store_info * sip)3231 sdeb_read_lock(struct sdeb_store_info *sip)
3232 {
3233 	if (sdebug_no_rwlock) {
3234 		if (sip)
3235 			__acquire(&sip->macc_lck);
3236 		else
3237 			__acquire(&sdeb_fake_rw_lck);
3238 	} else {
3239 		if (sip)
3240 			read_lock(&sip->macc_lck);
3241 		else
3242 			read_lock(&sdeb_fake_rw_lck);
3243 	}
3244 }
3245 
3246 static inline void
sdeb_read_unlock(struct sdeb_store_info * sip)3247 sdeb_read_unlock(struct sdeb_store_info *sip)
3248 {
3249 	if (sdebug_no_rwlock) {
3250 		if (sip)
3251 			__release(&sip->macc_lck);
3252 		else
3253 			__release(&sdeb_fake_rw_lck);
3254 	} else {
3255 		if (sip)
3256 			read_unlock(&sip->macc_lck);
3257 		else
3258 			read_unlock(&sdeb_fake_rw_lck);
3259 	}
3260 }
3261 
3262 static inline void
sdeb_write_lock(struct sdeb_store_info * sip)3263 sdeb_write_lock(struct sdeb_store_info *sip)
3264 {
3265 	if (sdebug_no_rwlock) {
3266 		if (sip)
3267 			__acquire(&sip->macc_lck);
3268 		else
3269 			__acquire(&sdeb_fake_rw_lck);
3270 	} else {
3271 		if (sip)
3272 			write_lock(&sip->macc_lck);
3273 		else
3274 			write_lock(&sdeb_fake_rw_lck);
3275 	}
3276 }
3277 
3278 static inline void
sdeb_write_unlock(struct sdeb_store_info * sip)3279 sdeb_write_unlock(struct sdeb_store_info *sip)
3280 {
3281 	if (sdebug_no_rwlock) {
3282 		if (sip)
3283 			__release(&sip->macc_lck);
3284 		else
3285 			__release(&sdeb_fake_rw_lck);
3286 	} else {
3287 		if (sip)
3288 			write_unlock(&sip->macc_lck);
3289 		else
3290 			write_unlock(&sdeb_fake_rw_lck);
3291 	}
3292 }
3293 
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3294 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3295 {
3296 	bool check_prot;
3297 	u32 num;
3298 	u32 ei_lba;
3299 	int ret;
3300 	u64 lba;
3301 	struct sdeb_store_info *sip = devip2sip(devip, true);
3302 	u8 *cmd = scp->cmnd;
3303 
3304 	switch (cmd[0]) {
3305 	case READ_16:
3306 		ei_lba = 0;
3307 		lba = get_unaligned_be64(cmd + 2);
3308 		num = get_unaligned_be32(cmd + 10);
3309 		check_prot = true;
3310 		break;
3311 	case READ_10:
3312 		ei_lba = 0;
3313 		lba = get_unaligned_be32(cmd + 2);
3314 		num = get_unaligned_be16(cmd + 7);
3315 		check_prot = true;
3316 		break;
3317 	case READ_6:
3318 		ei_lba = 0;
3319 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3320 		      (u32)(cmd[1] & 0x1f) << 16;
3321 		num = (0 == cmd[4]) ? 256 : cmd[4];
3322 		check_prot = true;
3323 		break;
3324 	case READ_12:
3325 		ei_lba = 0;
3326 		lba = get_unaligned_be32(cmd + 2);
3327 		num = get_unaligned_be32(cmd + 6);
3328 		check_prot = true;
3329 		break;
3330 	case XDWRITEREAD_10:
3331 		ei_lba = 0;
3332 		lba = get_unaligned_be32(cmd + 2);
3333 		num = get_unaligned_be16(cmd + 7);
3334 		check_prot = false;
3335 		break;
3336 	default:	/* assume READ(32) */
3337 		lba = get_unaligned_be64(cmd + 12);
3338 		ei_lba = get_unaligned_be32(cmd + 20);
3339 		num = get_unaligned_be32(cmd + 28);
3340 		check_prot = false;
3341 		break;
3342 	}
3343 	if (unlikely(have_dif_prot && check_prot)) {
3344 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3345 		    (cmd[1] & 0xe0)) {
3346 			mk_sense_invalid_opcode(scp);
3347 			return check_condition_result;
3348 		}
3349 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3350 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3351 		    (cmd[1] & 0xe0) == 0)
3352 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3353 				    "to DIF device\n");
3354 	}
3355 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3356 		     atomic_read(&sdeb_inject_pending))) {
3357 		num /= 2;
3358 		atomic_set(&sdeb_inject_pending, 0);
3359 	}
3360 
3361 	ret = check_device_access_params(scp, lba, num, false);
3362 	if (ret)
3363 		return ret;
3364 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3365 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3366 		     ((lba + num) > sdebug_medium_error_start))) {
3367 		/* claim unrecoverable read error */
3368 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3369 		/* set info field and valid bit for fixed descriptor */
3370 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3371 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3372 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3373 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3374 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3375 		}
3376 		scsi_set_resid(scp, scsi_bufflen(scp));
3377 		return check_condition_result;
3378 	}
3379 
3380 	sdeb_read_lock(sip);
3381 
3382 	/* DIX + T10 DIF */
3383 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3384 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3385 		case 1: /* Guard tag error */
3386 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3387 				sdeb_read_unlock(sip);
3388 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3389 				return check_condition_result;
3390 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3391 				sdeb_read_unlock(sip);
3392 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3393 				return illegal_condition_result;
3394 			}
3395 			break;
3396 		case 3: /* Reference tag error */
3397 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3398 				sdeb_read_unlock(sip);
3399 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3400 				return check_condition_result;
3401 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3402 				sdeb_read_unlock(sip);
3403 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3404 				return illegal_condition_result;
3405 			}
3406 			break;
3407 		}
3408 	}
3409 
3410 	ret = do_device_access(sip, scp, 0, lba, num, false);
3411 	sdeb_read_unlock(sip);
3412 	if (unlikely(ret == -1))
3413 		return DID_ERROR << 16;
3414 
3415 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3416 
3417 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3418 		     atomic_read(&sdeb_inject_pending))) {
3419 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3420 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3421 			atomic_set(&sdeb_inject_pending, 0);
3422 			return check_condition_result;
3423 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3424 			/* Logical block guard check failed */
3425 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3426 			atomic_set(&sdeb_inject_pending, 0);
3427 			return illegal_condition_result;
3428 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3429 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3430 			atomic_set(&sdeb_inject_pending, 0);
3431 			return illegal_condition_result;
3432 		}
3433 	}
3434 	return 0;
3435 }
3436 
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)3437 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3438 			     unsigned int sectors, u32 ei_lba)
3439 {
3440 	int ret;
3441 	struct t10_pi_tuple *sdt;
3442 	void *daddr;
3443 	sector_t sector = start_sec;
3444 	int ppage_offset;
3445 	int dpage_offset;
3446 	struct sg_mapping_iter diter;
3447 	struct sg_mapping_iter piter;
3448 
3449 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3450 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3451 
3452 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3453 			scsi_prot_sg_count(SCpnt),
3454 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3455 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3456 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3457 
3458 	/* For each protection page */
3459 	while (sg_miter_next(&piter)) {
3460 		dpage_offset = 0;
3461 		if (WARN_ON(!sg_miter_next(&diter))) {
3462 			ret = 0x01;
3463 			goto out;
3464 		}
3465 
3466 		for (ppage_offset = 0; ppage_offset < piter.length;
3467 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3468 			/* If we're at the end of the current
3469 			 * data page advance to the next one
3470 			 */
3471 			if (dpage_offset >= diter.length) {
3472 				if (WARN_ON(!sg_miter_next(&diter))) {
3473 					ret = 0x01;
3474 					goto out;
3475 				}
3476 				dpage_offset = 0;
3477 			}
3478 
3479 			sdt = piter.addr + ppage_offset;
3480 			daddr = diter.addr + dpage_offset;
3481 
3482 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3483 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3484 				if (ret)
3485 					goto out;
3486 			}
3487 
3488 			sector++;
3489 			ei_lba++;
3490 			dpage_offset += sdebug_sector_size;
3491 		}
3492 		diter.consumed = dpage_offset;
3493 		sg_miter_stop(&diter);
3494 	}
3495 	sg_miter_stop(&piter);
3496 
3497 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3498 	dix_writes++;
3499 
3500 	return 0;
3501 
3502 out:
3503 	dif_errors++;
3504 	sg_miter_stop(&diter);
3505 	sg_miter_stop(&piter);
3506 	return ret;
3507 }
3508 
lba_to_map_index(sector_t lba)3509 static unsigned long lba_to_map_index(sector_t lba)
3510 {
3511 	if (sdebug_unmap_alignment)
3512 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3513 	sector_div(lba, sdebug_unmap_granularity);
3514 	return lba;
3515 }
3516 
map_index_to_lba(unsigned long index)3517 static sector_t map_index_to_lba(unsigned long index)
3518 {
3519 	sector_t lba = index * sdebug_unmap_granularity;
3520 
3521 	if (sdebug_unmap_alignment)
3522 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3523 	return lba;
3524 }
3525 
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)3526 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3527 			      unsigned int *num)
3528 {
3529 	sector_t end;
3530 	unsigned int mapped;
3531 	unsigned long index;
3532 	unsigned long next;
3533 
3534 	index = lba_to_map_index(lba);
3535 	mapped = test_bit(index, sip->map_storep);
3536 
3537 	if (mapped)
3538 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3539 	else
3540 		next = find_next_bit(sip->map_storep, map_size, index);
3541 
3542 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3543 	*num = end - lba;
3544 	return mapped;
3545 }
3546 
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3547 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3548 		       unsigned int len)
3549 {
3550 	sector_t end = lba + len;
3551 
3552 	while (lba < end) {
3553 		unsigned long index = lba_to_map_index(lba);
3554 
3555 		if (index < map_size)
3556 			set_bit(index, sip->map_storep);
3557 
3558 		lba = map_index_to_lba(index + 1);
3559 	}
3560 }
3561 
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3562 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3563 			 unsigned int len)
3564 {
3565 	sector_t end = lba + len;
3566 	u8 *fsp = sip->storep;
3567 
3568 	while (lba < end) {
3569 		unsigned long index = lba_to_map_index(lba);
3570 
3571 		if (lba == map_index_to_lba(index) &&
3572 		    lba + sdebug_unmap_granularity <= end &&
3573 		    index < map_size) {
3574 			clear_bit(index, sip->map_storep);
3575 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3576 				memset(fsp + lba * sdebug_sector_size,
3577 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3578 				       sdebug_sector_size *
3579 				       sdebug_unmap_granularity);
3580 			}
3581 			if (sip->dif_storep) {
3582 				memset(sip->dif_storep + lba, 0xff,
3583 				       sizeof(*sip->dif_storep) *
3584 				       sdebug_unmap_granularity);
3585 			}
3586 		}
3587 		lba = map_index_to_lba(index + 1);
3588 	}
3589 }
3590 
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3591 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3592 {
3593 	bool check_prot;
3594 	u32 num;
3595 	u32 ei_lba;
3596 	int ret;
3597 	u64 lba;
3598 	struct sdeb_store_info *sip = devip2sip(devip, true);
3599 	u8 *cmd = scp->cmnd;
3600 
3601 	switch (cmd[0]) {
3602 	case WRITE_16:
3603 		ei_lba = 0;
3604 		lba = get_unaligned_be64(cmd + 2);
3605 		num = get_unaligned_be32(cmd + 10);
3606 		check_prot = true;
3607 		break;
3608 	case WRITE_10:
3609 		ei_lba = 0;
3610 		lba = get_unaligned_be32(cmd + 2);
3611 		num = get_unaligned_be16(cmd + 7);
3612 		check_prot = true;
3613 		break;
3614 	case WRITE_6:
3615 		ei_lba = 0;
3616 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3617 		      (u32)(cmd[1] & 0x1f) << 16;
3618 		num = (0 == cmd[4]) ? 256 : cmd[4];
3619 		check_prot = true;
3620 		break;
3621 	case WRITE_12:
3622 		ei_lba = 0;
3623 		lba = get_unaligned_be32(cmd + 2);
3624 		num = get_unaligned_be32(cmd + 6);
3625 		check_prot = true;
3626 		break;
3627 	case 0x53:	/* XDWRITEREAD(10) */
3628 		ei_lba = 0;
3629 		lba = get_unaligned_be32(cmd + 2);
3630 		num = get_unaligned_be16(cmd + 7);
3631 		check_prot = false;
3632 		break;
3633 	default:	/* assume WRITE(32) */
3634 		lba = get_unaligned_be64(cmd + 12);
3635 		ei_lba = get_unaligned_be32(cmd + 20);
3636 		num = get_unaligned_be32(cmd + 28);
3637 		check_prot = false;
3638 		break;
3639 	}
3640 	if (unlikely(have_dif_prot && check_prot)) {
3641 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3642 		    (cmd[1] & 0xe0)) {
3643 			mk_sense_invalid_opcode(scp);
3644 			return check_condition_result;
3645 		}
3646 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3647 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3648 		    (cmd[1] & 0xe0) == 0)
3649 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3650 				    "to DIF device\n");
3651 	}
3652 
3653 	sdeb_write_lock(sip);
3654 	ret = check_device_access_params(scp, lba, num, true);
3655 	if (ret) {
3656 		sdeb_write_unlock(sip);
3657 		return ret;
3658 	}
3659 
3660 	/* DIX + T10 DIF */
3661 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3662 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3663 		case 1: /* Guard tag error */
3664 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3665 				sdeb_write_unlock(sip);
3666 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3667 				return illegal_condition_result;
3668 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3669 				sdeb_write_unlock(sip);
3670 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3671 				return check_condition_result;
3672 			}
3673 			break;
3674 		case 3: /* Reference tag error */
3675 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3676 				sdeb_write_unlock(sip);
3677 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3678 				return illegal_condition_result;
3679 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3680 				sdeb_write_unlock(sip);
3681 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3682 				return check_condition_result;
3683 			}
3684 			break;
3685 		}
3686 	}
3687 
3688 	ret = do_device_access(sip, scp, 0, lba, num, true);
3689 	if (unlikely(scsi_debug_lbp()))
3690 		map_region(sip, lba, num);
3691 	/* If ZBC zone then bump its write pointer */
3692 	if (sdebug_dev_is_zoned(devip))
3693 		zbc_inc_wp(devip, lba, num);
3694 	sdeb_write_unlock(sip);
3695 	if (unlikely(-1 == ret))
3696 		return DID_ERROR << 16;
3697 	else if (unlikely(sdebug_verbose &&
3698 			  (ret < (num * sdebug_sector_size))))
3699 		sdev_printk(KERN_INFO, scp->device,
3700 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3701 			    my_name, num * sdebug_sector_size, ret);
3702 
3703 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3704 		     atomic_read(&sdeb_inject_pending))) {
3705 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3706 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3707 			atomic_set(&sdeb_inject_pending, 0);
3708 			return check_condition_result;
3709 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3710 			/* Logical block guard check failed */
3711 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3712 			atomic_set(&sdeb_inject_pending, 0);
3713 			return illegal_condition_result;
3714 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3715 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3716 			atomic_set(&sdeb_inject_pending, 0);
3717 			return illegal_condition_result;
3718 		}
3719 	}
3720 	return 0;
3721 }
3722 
3723 /*
3724  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3725  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3726  */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3727 static int resp_write_scat(struct scsi_cmnd *scp,
3728 			   struct sdebug_dev_info *devip)
3729 {
3730 	u8 *cmd = scp->cmnd;
3731 	u8 *lrdp = NULL;
3732 	u8 *up;
3733 	struct sdeb_store_info *sip = devip2sip(devip, true);
3734 	u8 wrprotect;
3735 	u16 lbdof, num_lrd, k;
3736 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3737 	u32 lb_size = sdebug_sector_size;
3738 	u32 ei_lba;
3739 	u64 lba;
3740 	int ret, res;
3741 	bool is_16;
3742 	static const u32 lrd_size = 32; /* + parameter list header size */
3743 
3744 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3745 		is_16 = false;
3746 		wrprotect = (cmd[10] >> 5) & 0x7;
3747 		lbdof = get_unaligned_be16(cmd + 12);
3748 		num_lrd = get_unaligned_be16(cmd + 16);
3749 		bt_len = get_unaligned_be32(cmd + 28);
3750 	} else {        /* that leaves WRITE SCATTERED(16) */
3751 		is_16 = true;
3752 		wrprotect = (cmd[2] >> 5) & 0x7;
3753 		lbdof = get_unaligned_be16(cmd + 4);
3754 		num_lrd = get_unaligned_be16(cmd + 8);
3755 		bt_len = get_unaligned_be32(cmd + 10);
3756 		if (unlikely(have_dif_prot)) {
3757 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3758 			    wrprotect) {
3759 				mk_sense_invalid_opcode(scp);
3760 				return illegal_condition_result;
3761 			}
3762 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3763 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3764 			     wrprotect == 0)
3765 				sdev_printk(KERN_ERR, scp->device,
3766 					    "Unprotected WR to DIF device\n");
3767 		}
3768 	}
3769 	if ((num_lrd == 0) || (bt_len == 0))
3770 		return 0;       /* T10 says these do-nothings are not errors */
3771 	if (lbdof == 0) {
3772 		if (sdebug_verbose)
3773 			sdev_printk(KERN_INFO, scp->device,
3774 				"%s: %s: LB Data Offset field bad\n",
3775 				my_name, __func__);
3776 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3777 		return illegal_condition_result;
3778 	}
3779 	lbdof_blen = lbdof * lb_size;
3780 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3781 		if (sdebug_verbose)
3782 			sdev_printk(KERN_INFO, scp->device,
3783 				"%s: %s: LBA range descriptors don't fit\n",
3784 				my_name, __func__);
3785 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3786 		return illegal_condition_result;
3787 	}
3788 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3789 	if (lrdp == NULL)
3790 		return SCSI_MLQUEUE_HOST_BUSY;
3791 	if (sdebug_verbose)
3792 		sdev_printk(KERN_INFO, scp->device,
3793 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3794 			my_name, __func__, lbdof_blen);
3795 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3796 	if (res == -1) {
3797 		ret = DID_ERROR << 16;
3798 		goto err_out;
3799 	}
3800 
3801 	sdeb_write_lock(sip);
3802 	sg_off = lbdof_blen;
3803 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3804 	cum_lb = 0;
3805 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3806 		lba = get_unaligned_be64(up + 0);
3807 		num = get_unaligned_be32(up + 8);
3808 		if (sdebug_verbose)
3809 			sdev_printk(KERN_INFO, scp->device,
3810 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3811 				my_name, __func__, k, lba, num, sg_off);
3812 		if (num == 0)
3813 			continue;
3814 		ret = check_device_access_params(scp, lba, num, true);
3815 		if (ret)
3816 			goto err_out_unlock;
3817 		num_by = num * lb_size;
3818 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3819 
3820 		if ((cum_lb + num) > bt_len) {
3821 			if (sdebug_verbose)
3822 				sdev_printk(KERN_INFO, scp->device,
3823 				    "%s: %s: sum of blocks > data provided\n",
3824 				    my_name, __func__);
3825 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3826 					0);
3827 			ret = illegal_condition_result;
3828 			goto err_out_unlock;
3829 		}
3830 
3831 		/* DIX + T10 DIF */
3832 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3833 			int prot_ret = prot_verify_write(scp, lba, num,
3834 							 ei_lba);
3835 
3836 			if (prot_ret) {
3837 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3838 						prot_ret);
3839 				ret = illegal_condition_result;
3840 				goto err_out_unlock;
3841 			}
3842 		}
3843 
3844 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3845 		/* If ZBC zone then bump its write pointer */
3846 		if (sdebug_dev_is_zoned(devip))
3847 			zbc_inc_wp(devip, lba, num);
3848 		if (unlikely(scsi_debug_lbp()))
3849 			map_region(sip, lba, num);
3850 		if (unlikely(-1 == ret)) {
3851 			ret = DID_ERROR << 16;
3852 			goto err_out_unlock;
3853 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3854 			sdev_printk(KERN_INFO, scp->device,
3855 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3856 			    my_name, num_by, ret);
3857 
3858 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3859 			     atomic_read(&sdeb_inject_pending))) {
3860 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3861 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3862 				atomic_set(&sdeb_inject_pending, 0);
3863 				ret = check_condition_result;
3864 				goto err_out_unlock;
3865 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3866 				/* Logical block guard check failed */
3867 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3868 				atomic_set(&sdeb_inject_pending, 0);
3869 				ret = illegal_condition_result;
3870 				goto err_out_unlock;
3871 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3872 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3873 				atomic_set(&sdeb_inject_pending, 0);
3874 				ret = illegal_condition_result;
3875 				goto err_out_unlock;
3876 			}
3877 		}
3878 		sg_off += num_by;
3879 		cum_lb += num;
3880 	}
3881 	ret = 0;
3882 err_out_unlock:
3883 	sdeb_write_unlock(sip);
3884 err_out:
3885 	kfree(lrdp);
3886 	return ret;
3887 }
3888 
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3889 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3890 			   u32 ei_lba, bool unmap, bool ndob)
3891 {
3892 	struct scsi_device *sdp = scp->device;
3893 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3894 	unsigned long long i;
3895 	u64 block, lbaa;
3896 	u32 lb_size = sdebug_sector_size;
3897 	int ret;
3898 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3899 						scp->device->hostdata, true);
3900 	u8 *fs1p;
3901 	u8 *fsp;
3902 
3903 	sdeb_write_lock(sip);
3904 
3905 	ret = check_device_access_params(scp, lba, num, true);
3906 	if (ret) {
3907 		sdeb_write_unlock(sip);
3908 		return ret;
3909 	}
3910 
3911 	if (unmap && scsi_debug_lbp()) {
3912 		unmap_region(sip, lba, num);
3913 		goto out;
3914 	}
3915 	lbaa = lba;
3916 	block = do_div(lbaa, sdebug_store_sectors);
3917 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3918 	fsp = sip->storep;
3919 	fs1p = fsp + (block * lb_size);
3920 	if (ndob) {
3921 		memset(fs1p, 0, lb_size);
3922 		ret = 0;
3923 	} else
3924 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3925 
3926 	if (-1 == ret) {
3927 		sdeb_write_unlock(sip);
3928 		return DID_ERROR << 16;
3929 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3930 		sdev_printk(KERN_INFO, scp->device,
3931 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3932 			    my_name, "write same", lb_size, ret);
3933 
3934 	/* Copy first sector to remaining blocks */
3935 	for (i = 1 ; i < num ; i++) {
3936 		lbaa = lba + i;
3937 		block = do_div(lbaa, sdebug_store_sectors);
3938 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3939 	}
3940 	if (scsi_debug_lbp())
3941 		map_region(sip, lba, num);
3942 	/* If ZBC zone then bump its write pointer */
3943 	if (sdebug_dev_is_zoned(devip))
3944 		zbc_inc_wp(devip, lba, num);
3945 out:
3946 	sdeb_write_unlock(sip);
3947 
3948 	return 0;
3949 }
3950 
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3951 static int resp_write_same_10(struct scsi_cmnd *scp,
3952 			      struct sdebug_dev_info *devip)
3953 {
3954 	u8 *cmd = scp->cmnd;
3955 	u32 lba;
3956 	u16 num;
3957 	u32 ei_lba = 0;
3958 	bool unmap = false;
3959 
3960 	if (cmd[1] & 0x8) {
3961 		if (sdebug_lbpws10 == 0) {
3962 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3963 			return check_condition_result;
3964 		} else
3965 			unmap = true;
3966 	}
3967 	lba = get_unaligned_be32(cmd + 2);
3968 	num = get_unaligned_be16(cmd + 7);
3969 	if (num > sdebug_write_same_length) {
3970 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3971 		return check_condition_result;
3972 	}
3973 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3974 }
3975 
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3976 static int resp_write_same_16(struct scsi_cmnd *scp,
3977 			      struct sdebug_dev_info *devip)
3978 {
3979 	u8 *cmd = scp->cmnd;
3980 	u64 lba;
3981 	u32 num;
3982 	u32 ei_lba = 0;
3983 	bool unmap = false;
3984 	bool ndob = false;
3985 
3986 	if (cmd[1] & 0x8) {	/* UNMAP */
3987 		if (sdebug_lbpws == 0) {
3988 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3989 			return check_condition_result;
3990 		} else
3991 			unmap = true;
3992 	}
3993 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3994 		ndob = true;
3995 	lba = get_unaligned_be64(cmd + 2);
3996 	num = get_unaligned_be32(cmd + 10);
3997 	if (num > sdebug_write_same_length) {
3998 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3999 		return check_condition_result;
4000 	}
4001 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4002 }
4003 
4004 /* Note the mode field is in the same position as the (lower) service action
4005  * field. For the Report supported operation codes command, SPC-4 suggests
4006  * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4007 static int resp_write_buffer(struct scsi_cmnd *scp,
4008 			     struct sdebug_dev_info *devip)
4009 {
4010 	u8 *cmd = scp->cmnd;
4011 	struct scsi_device *sdp = scp->device;
4012 	struct sdebug_dev_info *dp;
4013 	u8 mode;
4014 
4015 	mode = cmd[1] & 0x1f;
4016 	switch (mode) {
4017 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4018 		/* set UAs on this device only */
4019 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4020 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4021 		break;
4022 	case 0x5:	/* download MC, save and ACT */
4023 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4024 		break;
4025 	case 0x6:	/* download MC with offsets and ACT */
4026 		/* set UAs on most devices (LUs) in this target */
4027 		list_for_each_entry(dp,
4028 				    &devip->sdbg_host->dev_info_list,
4029 				    dev_list)
4030 			if (dp->target == sdp->id) {
4031 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4032 				if (devip != dp)
4033 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4034 						dp->uas_bm);
4035 			}
4036 		break;
4037 	case 0x7:	/* download MC with offsets, save, and ACT */
4038 		/* set UA on all devices (LUs) in this target */
4039 		list_for_each_entry(dp,
4040 				    &devip->sdbg_host->dev_info_list,
4041 				    dev_list)
4042 			if (dp->target == sdp->id)
4043 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4044 					dp->uas_bm);
4045 		break;
4046 	default:
4047 		/* do nothing for this command for other mode values */
4048 		break;
4049 	}
4050 	return 0;
4051 }
4052 
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4053 static int resp_comp_write(struct scsi_cmnd *scp,
4054 			   struct sdebug_dev_info *devip)
4055 {
4056 	u8 *cmd = scp->cmnd;
4057 	u8 *arr;
4058 	struct sdeb_store_info *sip = devip2sip(devip, true);
4059 	u64 lba;
4060 	u32 dnum;
4061 	u32 lb_size = sdebug_sector_size;
4062 	u8 num;
4063 	int ret;
4064 	int retval = 0;
4065 
4066 	lba = get_unaligned_be64(cmd + 2);
4067 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4068 	if (0 == num)
4069 		return 0;	/* degenerate case, not an error */
4070 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4071 	    (cmd[1] & 0xe0)) {
4072 		mk_sense_invalid_opcode(scp);
4073 		return check_condition_result;
4074 	}
4075 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4076 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4077 	    (cmd[1] & 0xe0) == 0)
4078 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4079 			    "to DIF device\n");
4080 	ret = check_device_access_params(scp, lba, num, false);
4081 	if (ret)
4082 		return ret;
4083 	dnum = 2 * num;
4084 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4085 	if (NULL == arr) {
4086 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4087 				INSUFF_RES_ASCQ);
4088 		return check_condition_result;
4089 	}
4090 
4091 	sdeb_write_lock(sip);
4092 
4093 	ret = do_dout_fetch(scp, dnum, arr);
4094 	if (ret == -1) {
4095 		retval = DID_ERROR << 16;
4096 		goto cleanup;
4097 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4098 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4099 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4100 			    dnum * lb_size, ret);
4101 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4102 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4103 		retval = check_condition_result;
4104 		goto cleanup;
4105 	}
4106 	if (scsi_debug_lbp())
4107 		map_region(sip, lba, num);
4108 cleanup:
4109 	sdeb_write_unlock(sip);
4110 	kfree(arr);
4111 	return retval;
4112 }
4113 
4114 struct unmap_block_desc {
4115 	__be64	lba;
4116 	__be32	blocks;
4117 	__be32	__reserved;
4118 };
4119 
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4120 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4121 {
4122 	unsigned char *buf;
4123 	struct unmap_block_desc *desc;
4124 	struct sdeb_store_info *sip = devip2sip(devip, true);
4125 	unsigned int i, payload_len, descriptors;
4126 	int ret;
4127 
4128 	if (!scsi_debug_lbp())
4129 		return 0;	/* fib and say its done */
4130 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4131 	BUG_ON(scsi_bufflen(scp) != payload_len);
4132 
4133 	descriptors = (payload_len - 8) / 16;
4134 	if (descriptors > sdebug_unmap_max_desc) {
4135 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4136 		return check_condition_result;
4137 	}
4138 
4139 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4140 	if (!buf) {
4141 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4142 				INSUFF_RES_ASCQ);
4143 		return check_condition_result;
4144 	}
4145 
4146 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4147 
4148 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4149 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4150 
4151 	desc = (void *)&buf[8];
4152 
4153 	sdeb_write_lock(sip);
4154 
4155 	for (i = 0 ; i < descriptors ; i++) {
4156 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4157 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4158 
4159 		ret = check_device_access_params(scp, lba, num, true);
4160 		if (ret)
4161 			goto out;
4162 
4163 		unmap_region(sip, lba, num);
4164 	}
4165 
4166 	ret = 0;
4167 
4168 out:
4169 	sdeb_write_unlock(sip);
4170 	kfree(buf);
4171 
4172 	return ret;
4173 }
4174 
4175 #define SDEBUG_GET_LBA_STATUS_LEN 32
4176 
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4177 static int resp_get_lba_status(struct scsi_cmnd *scp,
4178 			       struct sdebug_dev_info *devip)
4179 {
4180 	u8 *cmd = scp->cmnd;
4181 	u64 lba;
4182 	u32 alloc_len, mapped, num;
4183 	int ret;
4184 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4185 
4186 	lba = get_unaligned_be64(cmd + 2);
4187 	alloc_len = get_unaligned_be32(cmd + 10);
4188 
4189 	if (alloc_len < 24)
4190 		return 0;
4191 
4192 	ret = check_device_access_params(scp, lba, 1, false);
4193 	if (ret)
4194 		return ret;
4195 
4196 	if (scsi_debug_lbp()) {
4197 		struct sdeb_store_info *sip = devip2sip(devip, true);
4198 
4199 		mapped = map_state(sip, lba, &num);
4200 	} else {
4201 		mapped = 1;
4202 		/* following just in case virtual_gb changed */
4203 		sdebug_capacity = get_sdebug_capacity();
4204 		if (sdebug_capacity - lba <= 0xffffffff)
4205 			num = sdebug_capacity - lba;
4206 		else
4207 			num = 0xffffffff;
4208 	}
4209 
4210 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4211 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4212 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4213 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4214 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4215 
4216 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4217 }
4218 
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4219 static int resp_sync_cache(struct scsi_cmnd *scp,
4220 			   struct sdebug_dev_info *devip)
4221 {
4222 	int res = 0;
4223 	u64 lba;
4224 	u32 num_blocks;
4225 	u8 *cmd = scp->cmnd;
4226 
4227 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4228 		lba = get_unaligned_be32(cmd + 2);
4229 		num_blocks = get_unaligned_be16(cmd + 7);
4230 	} else {				/* SYNCHRONIZE_CACHE(16) */
4231 		lba = get_unaligned_be64(cmd + 2);
4232 		num_blocks = get_unaligned_be32(cmd + 10);
4233 	}
4234 	if (lba + num_blocks > sdebug_capacity) {
4235 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4236 		return check_condition_result;
4237 	}
4238 	if (!write_since_sync || (cmd[1] & 0x2))
4239 		res = SDEG_RES_IMMED_MASK;
4240 	else		/* delay if write_since_sync and IMMED clear */
4241 		write_since_sync = false;
4242 	return res;
4243 }
4244 
4245 /*
4246  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4247  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4248  * a GOOD status otherwise. Model a disk with a big cache and yield
4249  * CONDITION MET. Actually tries to bring range in main memory into the
4250  * cache associated with the CPU(s).
4251  */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4252 static int resp_pre_fetch(struct scsi_cmnd *scp,
4253 			  struct sdebug_dev_info *devip)
4254 {
4255 	int res = 0;
4256 	u64 lba;
4257 	u64 block, rest = 0;
4258 	u32 nblks;
4259 	u8 *cmd = scp->cmnd;
4260 	struct sdeb_store_info *sip = devip2sip(devip, true);
4261 	u8 *fsp = sip->storep;
4262 
4263 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4264 		lba = get_unaligned_be32(cmd + 2);
4265 		nblks = get_unaligned_be16(cmd + 7);
4266 	} else {			/* PRE-FETCH(16) */
4267 		lba = get_unaligned_be64(cmd + 2);
4268 		nblks = get_unaligned_be32(cmd + 10);
4269 	}
4270 	if (lba + nblks > sdebug_capacity) {
4271 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4272 		return check_condition_result;
4273 	}
4274 	if (!fsp)
4275 		goto fini;
4276 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4277 	block = do_div(lba, sdebug_store_sectors);
4278 	if (block + nblks > sdebug_store_sectors)
4279 		rest = block + nblks - sdebug_store_sectors;
4280 
4281 	/* Try to bring the PRE-FETCH range into CPU's cache */
4282 	sdeb_read_lock(sip);
4283 	prefetch_range(fsp + (sdebug_sector_size * block),
4284 		       (nblks - rest) * sdebug_sector_size);
4285 	if (rest)
4286 		prefetch_range(fsp, rest * sdebug_sector_size);
4287 	sdeb_read_unlock(sip);
4288 fini:
4289 	if (cmd[1] & 0x2)
4290 		res = SDEG_RES_IMMED_MASK;
4291 	return res | condition_met_result;
4292 }
4293 
4294 #define RL_BUCKET_ELEMS 8
4295 
4296 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4297  * (W-LUN), the normal Linux scanning logic does not associate it with a
4298  * device (e.g. /dev/sg7). The following magic will make that association:
4299  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4300  * where <n> is a host number. If there are multiple targets in a host then
4301  * the above will associate a W-LUN to each target. To only get a W-LUN
4302  * for target 2, then use "echo '- 2 49409' > scan" .
4303  */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4304 static int resp_report_luns(struct scsi_cmnd *scp,
4305 			    struct sdebug_dev_info *devip)
4306 {
4307 	unsigned char *cmd = scp->cmnd;
4308 	unsigned int alloc_len;
4309 	unsigned char select_report;
4310 	u64 lun;
4311 	struct scsi_lun *lun_p;
4312 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4313 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4314 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4315 	unsigned int tlun_cnt;	/* total LUN count */
4316 	unsigned int rlen;	/* response length (in bytes) */
4317 	int k, j, n, res;
4318 	unsigned int off_rsp = 0;
4319 	const int sz_lun = sizeof(struct scsi_lun);
4320 
4321 	clear_luns_changed_on_target(devip);
4322 
4323 	select_report = cmd[2];
4324 	alloc_len = get_unaligned_be32(cmd + 6);
4325 
4326 	if (alloc_len < 4) {
4327 		pr_err("alloc len too small %d\n", alloc_len);
4328 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4329 		return check_condition_result;
4330 	}
4331 
4332 	switch (select_report) {
4333 	case 0:		/* all LUNs apart from W-LUNs */
4334 		lun_cnt = sdebug_max_luns;
4335 		wlun_cnt = 0;
4336 		break;
4337 	case 1:		/* only W-LUNs */
4338 		lun_cnt = 0;
4339 		wlun_cnt = 1;
4340 		break;
4341 	case 2:		/* all LUNs */
4342 		lun_cnt = sdebug_max_luns;
4343 		wlun_cnt = 1;
4344 		break;
4345 	case 0x10:	/* only administrative LUs */
4346 	case 0x11:	/* see SPC-5 */
4347 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4348 	default:
4349 		pr_debug("select report invalid %d\n", select_report);
4350 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4351 		return check_condition_result;
4352 	}
4353 
4354 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4355 		--lun_cnt;
4356 
4357 	tlun_cnt = lun_cnt + wlun_cnt;
4358 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4359 	scsi_set_resid(scp, scsi_bufflen(scp));
4360 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4361 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4362 
4363 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4364 	lun = sdebug_no_lun_0 ? 1 : 0;
4365 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4366 		memset(arr, 0, sizeof(arr));
4367 		lun_p = (struct scsi_lun *)&arr[0];
4368 		if (k == 0) {
4369 			put_unaligned_be32(rlen, &arr[0]);
4370 			++lun_p;
4371 			j = 1;
4372 		}
4373 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4374 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4375 				break;
4376 			int_to_scsilun(lun++, lun_p);
4377 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4378 				lun_p->scsi_lun[0] |= 0x40;
4379 		}
4380 		if (j < RL_BUCKET_ELEMS)
4381 			break;
4382 		n = j * sz_lun;
4383 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4384 		if (res)
4385 			return res;
4386 		off_rsp += n;
4387 	}
4388 	if (wlun_cnt) {
4389 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4390 		++j;
4391 	}
4392 	if (j > 0)
4393 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4394 	return res;
4395 }
4396 
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4397 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4398 {
4399 	bool is_bytchk3 = false;
4400 	u8 bytchk;
4401 	int ret, j;
4402 	u32 vnum, a_num, off;
4403 	const u32 lb_size = sdebug_sector_size;
4404 	u64 lba;
4405 	u8 *arr;
4406 	u8 *cmd = scp->cmnd;
4407 	struct sdeb_store_info *sip = devip2sip(devip, true);
4408 
4409 	bytchk = (cmd[1] >> 1) & 0x3;
4410 	if (bytchk == 0) {
4411 		return 0;	/* always claim internal verify okay */
4412 	} else if (bytchk == 2) {
4413 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4414 		return check_condition_result;
4415 	} else if (bytchk == 3) {
4416 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4417 	}
4418 	switch (cmd[0]) {
4419 	case VERIFY_16:
4420 		lba = get_unaligned_be64(cmd + 2);
4421 		vnum = get_unaligned_be32(cmd + 10);
4422 		break;
4423 	case VERIFY:		/* is VERIFY(10) */
4424 		lba = get_unaligned_be32(cmd + 2);
4425 		vnum = get_unaligned_be16(cmd + 7);
4426 		break;
4427 	default:
4428 		mk_sense_invalid_opcode(scp);
4429 		return check_condition_result;
4430 	}
4431 	if (vnum == 0)
4432 		return 0;	/* not an error */
4433 	a_num = is_bytchk3 ? 1 : vnum;
4434 	/* Treat following check like one for read (i.e. no write) access */
4435 	ret = check_device_access_params(scp, lba, a_num, false);
4436 	if (ret)
4437 		return ret;
4438 
4439 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4440 	if (!arr) {
4441 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4442 				INSUFF_RES_ASCQ);
4443 		return check_condition_result;
4444 	}
4445 	/* Not changing store, so only need read access */
4446 	sdeb_read_lock(sip);
4447 
4448 	ret = do_dout_fetch(scp, a_num, arr);
4449 	if (ret == -1) {
4450 		ret = DID_ERROR << 16;
4451 		goto cleanup;
4452 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4453 		sdev_printk(KERN_INFO, scp->device,
4454 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4455 			    my_name, __func__, a_num * lb_size, ret);
4456 	}
4457 	if (is_bytchk3) {
4458 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4459 			memcpy(arr + off, arr, lb_size);
4460 	}
4461 	ret = 0;
4462 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4463 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4464 		ret = check_condition_result;
4465 		goto cleanup;
4466 	}
4467 cleanup:
4468 	sdeb_read_unlock(sip);
4469 	kfree(arr);
4470 	return ret;
4471 }
4472 
4473 #define RZONES_DESC_HD 64
4474 
4475 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4476 static int resp_report_zones(struct scsi_cmnd *scp,
4477 			     struct sdebug_dev_info *devip)
4478 {
4479 	unsigned int rep_max_zones, nrz = 0;
4480 	int ret = 0;
4481 	u32 alloc_len, rep_opts, rep_len;
4482 	bool partial;
4483 	u64 lba, zs_lba;
4484 	u8 *arr = NULL, *desc;
4485 	u8 *cmd = scp->cmnd;
4486 	struct sdeb_zone_state *zsp = NULL;
4487 	struct sdeb_store_info *sip = devip2sip(devip, false);
4488 
4489 	if (!sdebug_dev_is_zoned(devip)) {
4490 		mk_sense_invalid_opcode(scp);
4491 		return check_condition_result;
4492 	}
4493 	zs_lba = get_unaligned_be64(cmd + 2);
4494 	alloc_len = get_unaligned_be32(cmd + 10);
4495 	if (alloc_len == 0)
4496 		return 0;	/* not an error */
4497 	rep_opts = cmd[14] & 0x3f;
4498 	partial = cmd[14] & 0x80;
4499 
4500 	if (zs_lba >= sdebug_capacity) {
4501 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4502 		return check_condition_result;
4503 	}
4504 
4505 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4506 
4507 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4508 	if (!arr) {
4509 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4510 				INSUFF_RES_ASCQ);
4511 		return check_condition_result;
4512 	}
4513 
4514 	sdeb_read_lock(sip);
4515 
4516 	desc = arr + 64;
4517 	for (lba = zs_lba; lba < sdebug_capacity;
4518 	     lba = zsp->z_start + zsp->z_size) {
4519 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4520 			break;
4521 		zsp = zbc_zone(devip, lba);
4522 		switch (rep_opts) {
4523 		case 0x00:
4524 			/* All zones */
4525 			break;
4526 		case 0x01:
4527 			/* Empty zones */
4528 			if (zsp->z_cond != ZC1_EMPTY)
4529 				continue;
4530 			break;
4531 		case 0x02:
4532 			/* Implicit open zones */
4533 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4534 				continue;
4535 			break;
4536 		case 0x03:
4537 			/* Explicit open zones */
4538 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4539 				continue;
4540 			break;
4541 		case 0x04:
4542 			/* Closed zones */
4543 			if (zsp->z_cond != ZC4_CLOSED)
4544 				continue;
4545 			break;
4546 		case 0x05:
4547 			/* Full zones */
4548 			if (zsp->z_cond != ZC5_FULL)
4549 				continue;
4550 			break;
4551 		case 0x06:
4552 		case 0x07:
4553 		case 0x10:
4554 			/*
4555 			 * Read-only, offline, reset WP recommended are
4556 			 * not emulated: no zones to report;
4557 			 */
4558 			continue;
4559 		case 0x11:
4560 			/* non-seq-resource set */
4561 			if (!zsp->z_non_seq_resource)
4562 				continue;
4563 			break;
4564 		case 0x3e:
4565 			/* All zones except gap zones. */
4566 			if (zbc_zone_is_gap(zsp))
4567 				continue;
4568 			break;
4569 		case 0x3f:
4570 			/* Not write pointer (conventional) zones */
4571 			if (zbc_zone_is_seq(zsp))
4572 				continue;
4573 			break;
4574 		default:
4575 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4576 					INVALID_FIELD_IN_CDB, 0);
4577 			ret = check_condition_result;
4578 			goto fini;
4579 		}
4580 
4581 		if (nrz < rep_max_zones) {
4582 			/* Fill zone descriptor */
4583 			desc[0] = zsp->z_type;
4584 			desc[1] = zsp->z_cond << 4;
4585 			if (zsp->z_non_seq_resource)
4586 				desc[1] |= 1 << 1;
4587 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4588 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4589 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4590 			desc += 64;
4591 		}
4592 
4593 		if (partial && nrz >= rep_max_zones)
4594 			break;
4595 
4596 		nrz++;
4597 	}
4598 
4599 	/* Report header */
4600 	/* Zone list length. */
4601 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4602 	/* Maximum LBA */
4603 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4604 	/* Zone starting LBA granularity. */
4605 	if (devip->zcap < devip->zsize)
4606 		put_unaligned_be64(devip->zsize, arr + 16);
4607 
4608 	rep_len = (unsigned long)desc - (unsigned long)arr;
4609 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4610 
4611 fini:
4612 	sdeb_read_unlock(sip);
4613 	kfree(arr);
4614 	return ret;
4615 }
4616 
4617 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)4618 static void zbc_open_all(struct sdebug_dev_info *devip)
4619 {
4620 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4621 	unsigned int i;
4622 
4623 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4624 		if (zsp->z_cond == ZC4_CLOSED)
4625 			zbc_open_zone(devip, &devip->zstate[i], true);
4626 	}
4627 }
4628 
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4629 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4630 {
4631 	int res = 0;
4632 	u64 z_id;
4633 	enum sdebug_z_cond zc;
4634 	u8 *cmd = scp->cmnd;
4635 	struct sdeb_zone_state *zsp;
4636 	bool all = cmd[14] & 0x01;
4637 	struct sdeb_store_info *sip = devip2sip(devip, false);
4638 
4639 	if (!sdebug_dev_is_zoned(devip)) {
4640 		mk_sense_invalid_opcode(scp);
4641 		return check_condition_result;
4642 	}
4643 
4644 	sdeb_write_lock(sip);
4645 
4646 	if (all) {
4647 		/* Check if all closed zones can be open */
4648 		if (devip->max_open &&
4649 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4650 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4651 					INSUFF_ZONE_ASCQ);
4652 			res = check_condition_result;
4653 			goto fini;
4654 		}
4655 		/* Open all closed zones */
4656 		zbc_open_all(devip);
4657 		goto fini;
4658 	}
4659 
4660 	/* Open the specified zone */
4661 	z_id = get_unaligned_be64(cmd + 2);
4662 	if (z_id >= sdebug_capacity) {
4663 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4664 		res = check_condition_result;
4665 		goto fini;
4666 	}
4667 
4668 	zsp = zbc_zone(devip, z_id);
4669 	if (z_id != zsp->z_start) {
4670 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4671 		res = check_condition_result;
4672 		goto fini;
4673 	}
4674 	if (zbc_zone_is_conv(zsp)) {
4675 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4676 		res = check_condition_result;
4677 		goto fini;
4678 	}
4679 
4680 	zc = zsp->z_cond;
4681 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4682 		goto fini;
4683 
4684 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4685 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4686 				INSUFF_ZONE_ASCQ);
4687 		res = check_condition_result;
4688 		goto fini;
4689 	}
4690 
4691 	zbc_open_zone(devip, zsp, true);
4692 fini:
4693 	sdeb_write_unlock(sip);
4694 	return res;
4695 }
4696 
zbc_close_all(struct sdebug_dev_info * devip)4697 static void zbc_close_all(struct sdebug_dev_info *devip)
4698 {
4699 	unsigned int i;
4700 
4701 	for (i = 0; i < devip->nr_zones; i++)
4702 		zbc_close_zone(devip, &devip->zstate[i]);
4703 }
4704 
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4705 static int resp_close_zone(struct scsi_cmnd *scp,
4706 			   struct sdebug_dev_info *devip)
4707 {
4708 	int res = 0;
4709 	u64 z_id;
4710 	u8 *cmd = scp->cmnd;
4711 	struct sdeb_zone_state *zsp;
4712 	bool all = cmd[14] & 0x01;
4713 	struct sdeb_store_info *sip = devip2sip(devip, false);
4714 
4715 	if (!sdebug_dev_is_zoned(devip)) {
4716 		mk_sense_invalid_opcode(scp);
4717 		return check_condition_result;
4718 	}
4719 
4720 	sdeb_write_lock(sip);
4721 
4722 	if (all) {
4723 		zbc_close_all(devip);
4724 		goto fini;
4725 	}
4726 
4727 	/* Close specified zone */
4728 	z_id = get_unaligned_be64(cmd + 2);
4729 	if (z_id >= sdebug_capacity) {
4730 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4731 		res = check_condition_result;
4732 		goto fini;
4733 	}
4734 
4735 	zsp = zbc_zone(devip, z_id);
4736 	if (z_id != zsp->z_start) {
4737 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4738 		res = check_condition_result;
4739 		goto fini;
4740 	}
4741 	if (zbc_zone_is_conv(zsp)) {
4742 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4743 		res = check_condition_result;
4744 		goto fini;
4745 	}
4746 
4747 	zbc_close_zone(devip, zsp);
4748 fini:
4749 	sdeb_write_unlock(sip);
4750 	return res;
4751 }
4752 
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)4753 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4754 			    struct sdeb_zone_state *zsp, bool empty)
4755 {
4756 	enum sdebug_z_cond zc = zsp->z_cond;
4757 
4758 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4759 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4760 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4761 			zbc_close_zone(devip, zsp);
4762 		if (zsp->z_cond == ZC4_CLOSED)
4763 			devip->nr_closed--;
4764 		zsp->z_wp = zsp->z_start + zsp->z_size;
4765 		zsp->z_cond = ZC5_FULL;
4766 	}
4767 }
4768 
zbc_finish_all(struct sdebug_dev_info * devip)4769 static void zbc_finish_all(struct sdebug_dev_info *devip)
4770 {
4771 	unsigned int i;
4772 
4773 	for (i = 0; i < devip->nr_zones; i++)
4774 		zbc_finish_zone(devip, &devip->zstate[i], false);
4775 }
4776 
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4777 static int resp_finish_zone(struct scsi_cmnd *scp,
4778 			    struct sdebug_dev_info *devip)
4779 {
4780 	struct sdeb_zone_state *zsp;
4781 	int res = 0;
4782 	u64 z_id;
4783 	u8 *cmd = scp->cmnd;
4784 	bool all = cmd[14] & 0x01;
4785 	struct sdeb_store_info *sip = devip2sip(devip, false);
4786 
4787 	if (!sdebug_dev_is_zoned(devip)) {
4788 		mk_sense_invalid_opcode(scp);
4789 		return check_condition_result;
4790 	}
4791 
4792 	sdeb_write_lock(sip);
4793 
4794 	if (all) {
4795 		zbc_finish_all(devip);
4796 		goto fini;
4797 	}
4798 
4799 	/* Finish the specified zone */
4800 	z_id = get_unaligned_be64(cmd + 2);
4801 	if (z_id >= sdebug_capacity) {
4802 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4803 		res = check_condition_result;
4804 		goto fini;
4805 	}
4806 
4807 	zsp = zbc_zone(devip, z_id);
4808 	if (z_id != zsp->z_start) {
4809 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4810 		res = check_condition_result;
4811 		goto fini;
4812 	}
4813 	if (zbc_zone_is_conv(zsp)) {
4814 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4815 		res = check_condition_result;
4816 		goto fini;
4817 	}
4818 
4819 	zbc_finish_zone(devip, zsp, true);
4820 fini:
4821 	sdeb_write_unlock(sip);
4822 	return res;
4823 }
4824 
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)4825 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4826 			 struct sdeb_zone_state *zsp)
4827 {
4828 	enum sdebug_z_cond zc;
4829 	struct sdeb_store_info *sip = devip2sip(devip, false);
4830 
4831 	if (!zbc_zone_is_seq(zsp))
4832 		return;
4833 
4834 	zc = zsp->z_cond;
4835 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4836 		zbc_close_zone(devip, zsp);
4837 
4838 	if (zsp->z_cond == ZC4_CLOSED)
4839 		devip->nr_closed--;
4840 
4841 	if (zsp->z_wp > zsp->z_start)
4842 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4843 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4844 
4845 	zsp->z_non_seq_resource = false;
4846 	zsp->z_wp = zsp->z_start;
4847 	zsp->z_cond = ZC1_EMPTY;
4848 }
4849 
zbc_rwp_all(struct sdebug_dev_info * devip)4850 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4851 {
4852 	unsigned int i;
4853 
4854 	for (i = 0; i < devip->nr_zones; i++)
4855 		zbc_rwp_zone(devip, &devip->zstate[i]);
4856 }
4857 
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4858 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4859 {
4860 	struct sdeb_zone_state *zsp;
4861 	int res = 0;
4862 	u64 z_id;
4863 	u8 *cmd = scp->cmnd;
4864 	bool all = cmd[14] & 0x01;
4865 	struct sdeb_store_info *sip = devip2sip(devip, false);
4866 
4867 	if (!sdebug_dev_is_zoned(devip)) {
4868 		mk_sense_invalid_opcode(scp);
4869 		return check_condition_result;
4870 	}
4871 
4872 	sdeb_write_lock(sip);
4873 
4874 	if (all) {
4875 		zbc_rwp_all(devip);
4876 		goto fini;
4877 	}
4878 
4879 	z_id = get_unaligned_be64(cmd + 2);
4880 	if (z_id >= sdebug_capacity) {
4881 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4882 		res = check_condition_result;
4883 		goto fini;
4884 	}
4885 
4886 	zsp = zbc_zone(devip, z_id);
4887 	if (z_id != zsp->z_start) {
4888 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4889 		res = check_condition_result;
4890 		goto fini;
4891 	}
4892 	if (zbc_zone_is_conv(zsp)) {
4893 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4894 		res = check_condition_result;
4895 		goto fini;
4896 	}
4897 
4898 	zbc_rwp_zone(devip, zsp);
4899 fini:
4900 	sdeb_write_unlock(sip);
4901 	return res;
4902 }
4903 
get_queue(struct scsi_cmnd * cmnd)4904 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4905 {
4906 	u16 hwq;
4907 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4908 
4909 	hwq = blk_mq_unique_tag_to_hwq(tag);
4910 
4911 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4912 	if (WARN_ON_ONCE(hwq >= submit_queues))
4913 		hwq = 0;
4914 
4915 	return sdebug_q_arr + hwq;
4916 }
4917 
get_tag(struct scsi_cmnd * cmnd)4918 static u32 get_tag(struct scsi_cmnd *cmnd)
4919 {
4920 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4921 }
4922 
4923 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)4924 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4925 {
4926 	bool aborted = sd_dp->aborted;
4927 	int qc_idx;
4928 	int retiring = 0;
4929 	unsigned long iflags;
4930 	struct sdebug_queue *sqp;
4931 	struct sdebug_queued_cmd *sqcp;
4932 	struct scsi_cmnd *scp;
4933 	struct sdebug_dev_info *devip;
4934 
4935 	if (unlikely(aborted))
4936 		sd_dp->aborted = false;
4937 	qc_idx = sd_dp->qc_idx;
4938 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4939 	if (sdebug_statistics) {
4940 		atomic_inc(&sdebug_completions);
4941 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4942 			atomic_inc(&sdebug_miss_cpus);
4943 	}
4944 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4945 		pr_err("wild qc_idx=%d\n", qc_idx);
4946 		return;
4947 	}
4948 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4949 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4950 	sqcp = &sqp->qc_arr[qc_idx];
4951 	scp = sqcp->a_cmnd;
4952 	if (unlikely(scp == NULL)) {
4953 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4954 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4955 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4956 		return;
4957 	}
4958 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4959 	if (likely(devip))
4960 		atomic_dec(&devip->num_in_q);
4961 	else
4962 		pr_err("devip=NULL\n");
4963 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4964 		retiring = 1;
4965 
4966 	sqcp->a_cmnd = NULL;
4967 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4968 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4969 		pr_err("Unexpected completion\n");
4970 		return;
4971 	}
4972 
4973 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4974 		int k, retval;
4975 
4976 		retval = atomic_read(&retired_max_queue);
4977 		if (qc_idx >= retval) {
4978 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4979 			pr_err("index %d too large\n", retval);
4980 			return;
4981 		}
4982 		k = find_last_bit(sqp->in_use_bm, retval);
4983 		if ((k < sdebug_max_queue) || (k == retval))
4984 			atomic_set(&retired_max_queue, 0);
4985 		else
4986 			atomic_set(&retired_max_queue, k + 1);
4987 	}
4988 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4989 	if (unlikely(aborted)) {
4990 		if (sdebug_verbose)
4991 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4992 		return;
4993 	}
4994 	scsi_done(scp); /* callback to mid level */
4995 }
4996 
4997 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)4998 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4999 {
5000 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5001 						  hrt);
5002 	sdebug_q_cmd_complete(sd_dp);
5003 	return HRTIMER_NORESTART;
5004 }
5005 
5006 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)5007 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5008 {
5009 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5010 						  ew.work);
5011 	sdebug_q_cmd_complete(sd_dp);
5012 }
5013 
5014 static bool got_shared_uuid;
5015 static uuid_t shared_uuid;
5016 
sdebug_device_create_zones(struct sdebug_dev_info * devip)5017 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5018 {
5019 	struct sdeb_zone_state *zsp;
5020 	sector_t capacity = get_sdebug_capacity();
5021 	sector_t conv_capacity;
5022 	sector_t zstart = 0;
5023 	unsigned int i;
5024 
5025 	/*
5026 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5027 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5028 	 * use the specified zone size checking that at least 2 zones can be
5029 	 * created for the device.
5030 	 */
5031 	if (!sdeb_zbc_zone_size_mb) {
5032 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5033 			>> ilog2(sdebug_sector_size);
5034 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5035 			devip->zsize >>= 1;
5036 		if (devip->zsize < 2) {
5037 			pr_err("Device capacity too small\n");
5038 			return -EINVAL;
5039 		}
5040 	} else {
5041 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5042 			pr_err("Zone size is not a power of 2\n");
5043 			return -EINVAL;
5044 		}
5045 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5046 			>> ilog2(sdebug_sector_size);
5047 		if (devip->zsize >= capacity) {
5048 			pr_err("Zone size too large for device capacity\n");
5049 			return -EINVAL;
5050 		}
5051 	}
5052 
5053 	devip->zsize_shift = ilog2(devip->zsize);
5054 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5055 
5056 	if (sdeb_zbc_zone_cap_mb == 0) {
5057 		devip->zcap = devip->zsize;
5058 	} else {
5059 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5060 			      ilog2(sdebug_sector_size);
5061 		if (devip->zcap > devip->zsize) {
5062 			pr_err("Zone capacity too large\n");
5063 			return -EINVAL;
5064 		}
5065 	}
5066 
5067 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5068 	if (conv_capacity >= capacity) {
5069 		pr_err("Number of conventional zones too large\n");
5070 		return -EINVAL;
5071 	}
5072 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5073 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5074 			      devip->zsize_shift;
5075 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5076 
5077 	/* Add gap zones if zone capacity is smaller than the zone size */
5078 	if (devip->zcap < devip->zsize)
5079 		devip->nr_zones += devip->nr_seq_zones;
5080 
5081 	if (devip->zmodel == BLK_ZONED_HM) {
5082 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5083 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5084 			devip->max_open = (devip->nr_zones - 1) / 2;
5085 		else
5086 			devip->max_open = sdeb_zbc_max_open;
5087 	}
5088 
5089 	devip->zstate = kcalloc(devip->nr_zones,
5090 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5091 	if (!devip->zstate)
5092 		return -ENOMEM;
5093 
5094 	for (i = 0; i < devip->nr_zones; i++) {
5095 		zsp = &devip->zstate[i];
5096 
5097 		zsp->z_start = zstart;
5098 
5099 		if (i < devip->nr_conv_zones) {
5100 			zsp->z_type = ZBC_ZTYPE_CNV;
5101 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5102 			zsp->z_wp = (sector_t)-1;
5103 			zsp->z_size =
5104 				min_t(u64, devip->zsize, capacity - zstart);
5105 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5106 			if (devip->zmodel == BLK_ZONED_HM)
5107 				zsp->z_type = ZBC_ZTYPE_SWR;
5108 			else
5109 				zsp->z_type = ZBC_ZTYPE_SWP;
5110 			zsp->z_cond = ZC1_EMPTY;
5111 			zsp->z_wp = zsp->z_start;
5112 			zsp->z_size =
5113 				min_t(u64, devip->zcap, capacity - zstart);
5114 		} else {
5115 			zsp->z_type = ZBC_ZTYPE_GAP;
5116 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5117 			zsp->z_wp = (sector_t)-1;
5118 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5119 					    capacity - zstart);
5120 		}
5121 
5122 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5123 		zstart += zsp->z_size;
5124 	}
5125 
5126 	return 0;
5127 }
5128 
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5129 static struct sdebug_dev_info *sdebug_device_create(
5130 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5131 {
5132 	struct sdebug_dev_info *devip;
5133 
5134 	devip = kzalloc(sizeof(*devip), flags);
5135 	if (devip) {
5136 		if (sdebug_uuid_ctl == 1)
5137 			uuid_gen(&devip->lu_name);
5138 		else if (sdebug_uuid_ctl == 2) {
5139 			if (got_shared_uuid)
5140 				devip->lu_name = shared_uuid;
5141 			else {
5142 				uuid_gen(&shared_uuid);
5143 				got_shared_uuid = true;
5144 				devip->lu_name = shared_uuid;
5145 			}
5146 		}
5147 		devip->sdbg_host = sdbg_host;
5148 		if (sdeb_zbc_in_use) {
5149 			devip->zmodel = sdeb_zbc_model;
5150 			if (sdebug_device_create_zones(devip)) {
5151 				kfree(devip);
5152 				return NULL;
5153 			}
5154 		} else {
5155 			devip->zmodel = BLK_ZONED_NONE;
5156 		}
5157 		devip->sdbg_host = sdbg_host;
5158 		devip->create_ts = ktime_get_boottime();
5159 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5160 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5161 	}
5162 	return devip;
5163 }
5164 
find_build_dev_info(struct scsi_device * sdev)5165 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5166 {
5167 	struct sdebug_host_info *sdbg_host;
5168 	struct sdebug_dev_info *open_devip = NULL;
5169 	struct sdebug_dev_info *devip;
5170 
5171 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5172 	if (!sdbg_host) {
5173 		pr_err("Host info NULL\n");
5174 		return NULL;
5175 	}
5176 
5177 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5178 		if ((devip->used) && (devip->channel == sdev->channel) &&
5179 		    (devip->target == sdev->id) &&
5180 		    (devip->lun == sdev->lun))
5181 			return devip;
5182 		else {
5183 			if ((!devip->used) && (!open_devip))
5184 				open_devip = devip;
5185 		}
5186 	}
5187 	if (!open_devip) { /* try and make a new one */
5188 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5189 		if (!open_devip) {
5190 			pr_err("out of memory at line %d\n", __LINE__);
5191 			return NULL;
5192 		}
5193 	}
5194 
5195 	open_devip->channel = sdev->channel;
5196 	open_devip->target = sdev->id;
5197 	open_devip->lun = sdev->lun;
5198 	open_devip->sdbg_host = sdbg_host;
5199 	atomic_set(&open_devip->num_in_q, 0);
5200 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5201 	open_devip->used = true;
5202 	return open_devip;
5203 }
5204 
scsi_debug_slave_alloc(struct scsi_device * sdp)5205 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5206 {
5207 	if (sdebug_verbose)
5208 		pr_info("slave_alloc <%u %u %u %llu>\n",
5209 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5210 	return 0;
5211 }
5212 
scsi_debug_slave_configure(struct scsi_device * sdp)5213 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5214 {
5215 	struct sdebug_dev_info *devip =
5216 			(struct sdebug_dev_info *)sdp->hostdata;
5217 
5218 	if (sdebug_verbose)
5219 		pr_info("slave_configure <%u %u %u %llu>\n",
5220 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5221 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5222 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5223 	if (devip == NULL) {
5224 		devip = find_build_dev_info(sdp);
5225 		if (devip == NULL)
5226 			return 1;  /* no resources, will be marked offline */
5227 	}
5228 	sdp->hostdata = devip;
5229 	if (sdebug_no_uld)
5230 		sdp->no_uld_attach = 1;
5231 	config_cdb_len(sdp);
5232 	return 0;
5233 }
5234 
scsi_debug_slave_destroy(struct scsi_device * sdp)5235 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5236 {
5237 	struct sdebug_dev_info *devip =
5238 		(struct sdebug_dev_info *)sdp->hostdata;
5239 
5240 	if (sdebug_verbose)
5241 		pr_info("slave_destroy <%u %u %u %llu>\n",
5242 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5243 	if (devip) {
5244 		/* make this slot available for re-use */
5245 		devip->used = false;
5246 		sdp->hostdata = NULL;
5247 	}
5248 }
5249 
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5250 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5251 			   enum sdeb_defer_type defer_t)
5252 {
5253 	if (!sd_dp)
5254 		return;
5255 	if (defer_t == SDEB_DEFER_HRT)
5256 		hrtimer_cancel(&sd_dp->hrt);
5257 	else if (defer_t == SDEB_DEFER_WQ)
5258 		cancel_work_sync(&sd_dp->ew.work);
5259 }
5260 
5261 /* If @cmnd found deletes its timer or work queue and returns true; else
5262    returns false */
stop_queued_cmnd(struct scsi_cmnd * cmnd)5263 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5264 {
5265 	unsigned long iflags;
5266 	int j, k, qmax, r_qmax;
5267 	enum sdeb_defer_type l_defer_t;
5268 	struct sdebug_queue *sqp;
5269 	struct sdebug_queued_cmd *sqcp;
5270 	struct sdebug_dev_info *devip;
5271 	struct sdebug_defer *sd_dp;
5272 
5273 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5274 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5275 		qmax = sdebug_max_queue;
5276 		r_qmax = atomic_read(&retired_max_queue);
5277 		if (r_qmax > qmax)
5278 			qmax = r_qmax;
5279 		for (k = 0; k < qmax; ++k) {
5280 			if (test_bit(k, sqp->in_use_bm)) {
5281 				sqcp = &sqp->qc_arr[k];
5282 				if (cmnd != sqcp->a_cmnd)
5283 					continue;
5284 				/* found */
5285 				devip = (struct sdebug_dev_info *)
5286 						cmnd->device->hostdata;
5287 				if (devip)
5288 					atomic_dec(&devip->num_in_q);
5289 				sqcp->a_cmnd = NULL;
5290 				sd_dp = sqcp->sd_dp;
5291 				if (sd_dp) {
5292 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5293 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5294 				} else
5295 					l_defer_t = SDEB_DEFER_NONE;
5296 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5297 				stop_qc_helper(sd_dp, l_defer_t);
5298 				clear_bit(k, sqp->in_use_bm);
5299 				return true;
5300 			}
5301 		}
5302 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5303 	}
5304 	return false;
5305 }
5306 
5307 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)5308 static void stop_all_queued(void)
5309 {
5310 	unsigned long iflags;
5311 	int j, k;
5312 	enum sdeb_defer_type l_defer_t;
5313 	struct sdebug_queue *sqp;
5314 	struct sdebug_queued_cmd *sqcp;
5315 	struct sdebug_dev_info *devip;
5316 	struct sdebug_defer *sd_dp;
5317 
5318 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5319 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5320 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5321 			if (test_bit(k, sqp->in_use_bm)) {
5322 				sqcp = &sqp->qc_arr[k];
5323 				if (sqcp->a_cmnd == NULL)
5324 					continue;
5325 				devip = (struct sdebug_dev_info *)
5326 					sqcp->a_cmnd->device->hostdata;
5327 				if (devip)
5328 					atomic_dec(&devip->num_in_q);
5329 				sqcp->a_cmnd = NULL;
5330 				sd_dp = sqcp->sd_dp;
5331 				if (sd_dp) {
5332 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5333 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5334 				} else
5335 					l_defer_t = SDEB_DEFER_NONE;
5336 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5337 				stop_qc_helper(sd_dp, l_defer_t);
5338 				clear_bit(k, sqp->in_use_bm);
5339 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5340 			}
5341 		}
5342 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5343 	}
5344 }
5345 
5346 /* Free queued command memory on heap */
free_all_queued(void)5347 static void free_all_queued(void)
5348 {
5349 	int j, k;
5350 	struct sdebug_queue *sqp;
5351 	struct sdebug_queued_cmd *sqcp;
5352 
5353 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5354 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5355 			sqcp = &sqp->qc_arr[k];
5356 			kfree(sqcp->sd_dp);
5357 			sqcp->sd_dp = NULL;
5358 		}
5359 	}
5360 }
5361 
scsi_debug_abort(struct scsi_cmnd * SCpnt)5362 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5363 {
5364 	bool ok;
5365 
5366 	++num_aborts;
5367 	if (SCpnt) {
5368 		ok = stop_queued_cmnd(SCpnt);
5369 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5370 			sdev_printk(KERN_INFO, SCpnt->device,
5371 				    "%s: command%s found\n", __func__,
5372 				    ok ? "" : " not");
5373 	}
5374 	return SUCCESS;
5375 }
5376 
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)5377 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5378 {
5379 	++num_dev_resets;
5380 	if (SCpnt && SCpnt->device) {
5381 		struct scsi_device *sdp = SCpnt->device;
5382 		struct sdebug_dev_info *devip =
5383 				(struct sdebug_dev_info *)sdp->hostdata;
5384 
5385 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5386 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5387 		if (devip)
5388 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5389 	}
5390 	return SUCCESS;
5391 }
5392 
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)5393 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5394 {
5395 	struct sdebug_host_info *sdbg_host;
5396 	struct sdebug_dev_info *devip;
5397 	struct scsi_device *sdp;
5398 	struct Scsi_Host *hp;
5399 	int k = 0;
5400 
5401 	++num_target_resets;
5402 	if (!SCpnt)
5403 		goto lie;
5404 	sdp = SCpnt->device;
5405 	if (!sdp)
5406 		goto lie;
5407 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5408 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5409 	hp = sdp->host;
5410 	if (!hp)
5411 		goto lie;
5412 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5413 	if (sdbg_host) {
5414 		list_for_each_entry(devip,
5415 				    &sdbg_host->dev_info_list,
5416 				    dev_list)
5417 			if (devip->target == sdp->id) {
5418 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5419 				++k;
5420 			}
5421 	}
5422 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5423 		sdev_printk(KERN_INFO, sdp,
5424 			    "%s: %d device(s) found in target\n", __func__, k);
5425 lie:
5426 	return SUCCESS;
5427 }
5428 
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)5429 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5430 {
5431 	struct sdebug_host_info *sdbg_host;
5432 	struct sdebug_dev_info *devip;
5433 	struct scsi_device *sdp;
5434 	struct Scsi_Host *hp;
5435 	int k = 0;
5436 
5437 	++num_bus_resets;
5438 	if (!(SCpnt && SCpnt->device))
5439 		goto lie;
5440 	sdp = SCpnt->device;
5441 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5442 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5443 	hp = sdp->host;
5444 	if (hp) {
5445 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5446 		if (sdbg_host) {
5447 			list_for_each_entry(devip,
5448 					    &sdbg_host->dev_info_list,
5449 					    dev_list) {
5450 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5451 				++k;
5452 			}
5453 		}
5454 	}
5455 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5456 		sdev_printk(KERN_INFO, sdp,
5457 			    "%s: %d device(s) found in host\n", __func__, k);
5458 lie:
5459 	return SUCCESS;
5460 }
5461 
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)5462 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5463 {
5464 	struct sdebug_host_info *sdbg_host;
5465 	struct sdebug_dev_info *devip;
5466 	int k = 0;
5467 
5468 	++num_host_resets;
5469 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5470 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5471 	spin_lock(&sdebug_host_list_lock);
5472 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5473 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5474 				    dev_list) {
5475 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5476 			++k;
5477 		}
5478 	}
5479 	spin_unlock(&sdebug_host_list_lock);
5480 	stop_all_queued();
5481 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5482 		sdev_printk(KERN_INFO, SCpnt->device,
5483 			    "%s: %d device(s) found\n", __func__, k);
5484 	return SUCCESS;
5485 }
5486 
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)5487 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5488 {
5489 	struct msdos_partition *pp;
5490 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5491 	int sectors_per_part, num_sectors, k;
5492 	int heads_by_sects, start_sec, end_sec;
5493 
5494 	/* assume partition table already zeroed */
5495 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5496 		return;
5497 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5498 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5499 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5500 	}
5501 	num_sectors = (int)get_sdebug_capacity();
5502 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5503 			   / sdebug_num_parts;
5504 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5505 	starts[0] = sdebug_sectors_per;
5506 	max_part_secs = sectors_per_part;
5507 	for (k = 1; k < sdebug_num_parts; ++k) {
5508 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5509 			    * heads_by_sects;
5510 		if (starts[k] - starts[k - 1] < max_part_secs)
5511 			max_part_secs = starts[k] - starts[k - 1];
5512 	}
5513 	starts[sdebug_num_parts] = num_sectors;
5514 	starts[sdebug_num_parts + 1] = 0;
5515 
5516 	ramp[510] = 0x55;	/* magic partition markings */
5517 	ramp[511] = 0xAA;
5518 	pp = (struct msdos_partition *)(ramp + 0x1be);
5519 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5520 		start_sec = starts[k];
5521 		end_sec = starts[k] + max_part_secs - 1;
5522 		pp->boot_ind = 0;
5523 
5524 		pp->cyl = start_sec / heads_by_sects;
5525 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5526 			   / sdebug_sectors_per;
5527 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5528 
5529 		pp->end_cyl = end_sec / heads_by_sects;
5530 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5531 			       / sdebug_sectors_per;
5532 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5533 
5534 		pp->start_sect = cpu_to_le32(start_sec);
5535 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5536 		pp->sys_ind = 0x83;	/* plain Linux partition */
5537 	}
5538 }
5539 
block_unblock_all_queues(bool block)5540 static void block_unblock_all_queues(bool block)
5541 {
5542 	int j;
5543 	struct sdebug_queue *sqp;
5544 
5545 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5546 		atomic_set(&sqp->blocked, (int)block);
5547 }
5548 
5549 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5550  * commands will be processed normally before triggers occur.
5551  */
tweak_cmnd_count(void)5552 static void tweak_cmnd_count(void)
5553 {
5554 	int count, modulo;
5555 
5556 	modulo = abs(sdebug_every_nth);
5557 	if (modulo < 2)
5558 		return;
5559 	block_unblock_all_queues(true);
5560 	count = atomic_read(&sdebug_cmnd_count);
5561 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5562 	block_unblock_all_queues(false);
5563 }
5564 
clear_queue_stats(void)5565 static void clear_queue_stats(void)
5566 {
5567 	atomic_set(&sdebug_cmnd_count, 0);
5568 	atomic_set(&sdebug_completions, 0);
5569 	atomic_set(&sdebug_miss_cpus, 0);
5570 	atomic_set(&sdebug_a_tsf, 0);
5571 }
5572 
inject_on_this_cmd(void)5573 static bool inject_on_this_cmd(void)
5574 {
5575 	if (sdebug_every_nth == 0)
5576 		return false;
5577 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5578 }
5579 
5580 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5581 
5582 /* Complete the processing of the thread that queued a SCSI command to this
5583  * driver. It either completes the command by calling cmnd_done() or
5584  * schedules a hr timer or work queue then returns 0. Returns
5585  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5586  */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)5587 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5588 			 int scsi_result,
5589 			 int (*pfp)(struct scsi_cmnd *,
5590 				    struct sdebug_dev_info *),
5591 			 int delta_jiff, int ndelay)
5592 {
5593 	bool new_sd_dp;
5594 	bool inject = false;
5595 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5596 	int k, num_in_q, qdepth;
5597 	unsigned long iflags;
5598 	u64 ns_from_boot = 0;
5599 	struct sdebug_queue *sqp;
5600 	struct sdebug_queued_cmd *sqcp;
5601 	struct scsi_device *sdp;
5602 	struct sdebug_defer *sd_dp;
5603 
5604 	if (unlikely(devip == NULL)) {
5605 		if (scsi_result == 0)
5606 			scsi_result = DID_NO_CONNECT << 16;
5607 		goto respond_in_thread;
5608 	}
5609 	sdp = cmnd->device;
5610 
5611 	if (delta_jiff == 0)
5612 		goto respond_in_thread;
5613 
5614 	sqp = get_queue(cmnd);
5615 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5616 	if (unlikely(atomic_read(&sqp->blocked))) {
5617 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5618 		return SCSI_MLQUEUE_HOST_BUSY;
5619 	}
5620 	num_in_q = atomic_read(&devip->num_in_q);
5621 	qdepth = cmnd->device->queue_depth;
5622 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5623 		if (scsi_result) {
5624 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5625 			goto respond_in_thread;
5626 		} else
5627 			scsi_result = device_qfull_result;
5628 	} else if (unlikely(sdebug_every_nth &&
5629 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5630 			    (scsi_result == 0))) {
5631 		if ((num_in_q == (qdepth - 1)) &&
5632 		    (atomic_inc_return(&sdebug_a_tsf) >=
5633 		     abs(sdebug_every_nth))) {
5634 			atomic_set(&sdebug_a_tsf, 0);
5635 			inject = true;
5636 			scsi_result = device_qfull_result;
5637 		}
5638 	}
5639 
5640 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5641 	if (unlikely(k >= sdebug_max_queue)) {
5642 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5643 		if (scsi_result)
5644 			goto respond_in_thread;
5645 		scsi_result = device_qfull_result;
5646 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5647 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5648 				    __func__, sdebug_max_queue);
5649 		goto respond_in_thread;
5650 	}
5651 	set_bit(k, sqp->in_use_bm);
5652 	atomic_inc(&devip->num_in_q);
5653 	sqcp = &sqp->qc_arr[k];
5654 	sqcp->a_cmnd = cmnd;
5655 	cmnd->host_scribble = (unsigned char *)sqcp;
5656 	sd_dp = sqcp->sd_dp;
5657 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5658 
5659 	if (!sd_dp) {
5660 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5661 		if (!sd_dp) {
5662 			atomic_dec(&devip->num_in_q);
5663 			clear_bit(k, sqp->in_use_bm);
5664 			return SCSI_MLQUEUE_HOST_BUSY;
5665 		}
5666 		new_sd_dp = true;
5667 	} else {
5668 		new_sd_dp = false;
5669 	}
5670 
5671 	/* Set the hostwide tag */
5672 	if (sdebug_host_max_queue)
5673 		sd_dp->hc_idx = get_tag(cmnd);
5674 
5675 	if (polled)
5676 		ns_from_boot = ktime_get_boottime_ns();
5677 
5678 	/* one of the resp_*() response functions is called here */
5679 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5680 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5681 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5682 		delta_jiff = ndelay = 0;
5683 	}
5684 	if (cmnd->result == 0 && scsi_result != 0)
5685 		cmnd->result = scsi_result;
5686 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5687 		if (atomic_read(&sdeb_inject_pending)) {
5688 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5689 			atomic_set(&sdeb_inject_pending, 0);
5690 			cmnd->result = check_condition_result;
5691 		}
5692 	}
5693 
5694 	if (unlikely(sdebug_verbose && cmnd->result))
5695 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5696 			    __func__, cmnd->result);
5697 
5698 	if (delta_jiff > 0 || ndelay > 0) {
5699 		ktime_t kt;
5700 
5701 		if (delta_jiff > 0) {
5702 			u64 ns = jiffies_to_nsecs(delta_jiff);
5703 
5704 			if (sdebug_random && ns < U32_MAX) {
5705 				ns = prandom_u32_max((u32)ns);
5706 			} else if (sdebug_random) {
5707 				ns >>= 12;	/* scale to 4 usec precision */
5708 				if (ns < U32_MAX)	/* over 4 hours max */
5709 					ns = prandom_u32_max((u32)ns);
5710 				ns <<= 12;
5711 			}
5712 			kt = ns_to_ktime(ns);
5713 		} else {	/* ndelay has a 4.2 second max */
5714 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5715 					     (u32)ndelay;
5716 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5717 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5718 
5719 				if (kt <= d) {	/* elapsed duration >= kt */
5720 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5721 					sqcp->a_cmnd = NULL;
5722 					atomic_dec(&devip->num_in_q);
5723 					clear_bit(k, sqp->in_use_bm);
5724 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5725 					if (new_sd_dp)
5726 						kfree(sd_dp);
5727 					/* call scsi_done() from this thread */
5728 					scsi_done(cmnd);
5729 					return 0;
5730 				}
5731 				/* otherwise reduce kt by elapsed time */
5732 				kt -= d;
5733 			}
5734 		}
5735 		if (polled) {
5736 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5737 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5738 			if (!sd_dp->init_poll) {
5739 				sd_dp->init_poll = true;
5740 				sqcp->sd_dp = sd_dp;
5741 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5742 				sd_dp->qc_idx = k;
5743 			}
5744 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5745 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5746 		} else {
5747 			if (!sd_dp->init_hrt) {
5748 				sd_dp->init_hrt = true;
5749 				sqcp->sd_dp = sd_dp;
5750 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5751 					     HRTIMER_MODE_REL_PINNED);
5752 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5753 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5754 				sd_dp->qc_idx = k;
5755 			}
5756 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5757 			/* schedule the invocation of scsi_done() for a later time */
5758 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5759 		}
5760 		if (sdebug_statistics)
5761 			sd_dp->issuing_cpu = raw_smp_processor_id();
5762 	} else {	/* jdelay < 0, use work queue */
5763 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5764 			     atomic_read(&sdeb_inject_pending)))
5765 			sd_dp->aborted = true;
5766 		if (polled) {
5767 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5768 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5769 			if (!sd_dp->init_poll) {
5770 				sd_dp->init_poll = true;
5771 				sqcp->sd_dp = sd_dp;
5772 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5773 				sd_dp->qc_idx = k;
5774 			}
5775 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5776 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5777 		} else {
5778 			if (!sd_dp->init_wq) {
5779 				sd_dp->init_wq = true;
5780 				sqcp->sd_dp = sd_dp;
5781 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5782 				sd_dp->qc_idx = k;
5783 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5784 			}
5785 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5786 			schedule_work(&sd_dp->ew.work);
5787 		}
5788 		if (sdebug_statistics)
5789 			sd_dp->issuing_cpu = raw_smp_processor_id();
5790 		if (unlikely(sd_dp->aborted)) {
5791 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5792 				    scsi_cmd_to_rq(cmnd)->tag);
5793 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5794 			atomic_set(&sdeb_inject_pending, 0);
5795 			sd_dp->aborted = false;
5796 		}
5797 	}
5798 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5799 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5800 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5801 	return 0;
5802 
5803 respond_in_thread:	/* call back to mid-layer using invocation thread */
5804 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5805 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5806 	if (cmnd->result == 0 && scsi_result != 0)
5807 		cmnd->result = scsi_result;
5808 	scsi_done(cmnd);
5809 	return 0;
5810 }
5811 
5812 /* Note: The following macros create attribute files in the
5813    /sys/module/scsi_debug/parameters directory. Unfortunately this
5814    driver is unaware of a change and cannot trigger auxiliary actions
5815    as it can when the corresponding attribute in the
5816    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5817  */
5818 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5819 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5820 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5821 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5822 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5823 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5824 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5825 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5826 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5827 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5828 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5829 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5830 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5831 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5832 module_param_string(inq_product, sdebug_inq_product_id,
5833 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5834 module_param_string(inq_rev, sdebug_inq_product_rev,
5835 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5836 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5837 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5838 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5839 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5840 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5841 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5842 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5843 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5844 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5845 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5846 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5847 		   S_IRUGO | S_IWUSR);
5848 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5849 		   S_IRUGO | S_IWUSR);
5850 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5851 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5852 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5853 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5854 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5855 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5856 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5857 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5858 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5859 module_param_named(per_host_store, sdebug_per_host_store, bool,
5860 		   S_IRUGO | S_IWUSR);
5861 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5862 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5863 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5864 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5865 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5866 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5867 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5868 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5869 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5870 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5871 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5872 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5873 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5874 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5875 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5876 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5877 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5878 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5879 		   S_IRUGO | S_IWUSR);
5880 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5881 module_param_named(write_same_length, sdebug_write_same_length, int,
5882 		   S_IRUGO | S_IWUSR);
5883 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5884 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5885 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5886 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5887 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5888 
5889 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5890 MODULE_DESCRIPTION("SCSI debug adapter driver");
5891 MODULE_LICENSE("GPL");
5892 MODULE_VERSION(SDEBUG_VERSION);
5893 
5894 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5895 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5896 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5897 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5898 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5899 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5900 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5901 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5902 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5903 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5904 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5905 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5906 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5907 MODULE_PARM_DESC(host_max_queue,
5908 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5909 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5910 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5911 		 SDEBUG_VERSION "\")");
5912 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5913 MODULE_PARM_DESC(lbprz,
5914 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5915 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5916 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5917 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5918 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5919 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5920 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5921 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5922 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5923 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5924 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5925 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5926 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5927 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5928 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5929 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5930 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5931 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5932 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5933 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5934 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5935 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5936 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5937 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5938 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5939 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5940 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5941 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5942 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5943 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5944 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5945 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5946 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5947 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5948 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5949 MODULE_PARM_DESC(uuid_ctl,
5950 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5951 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5952 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5953 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5954 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5955 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5956 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5957 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5958 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5959 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5960 
5961 #define SDEBUG_INFO_LEN 256
5962 static char sdebug_info[SDEBUG_INFO_LEN];
5963 
scsi_debug_info(struct Scsi_Host * shp)5964 static const char *scsi_debug_info(struct Scsi_Host *shp)
5965 {
5966 	int k;
5967 
5968 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5969 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5970 	if (k >= (SDEBUG_INFO_LEN - 1))
5971 		return sdebug_info;
5972 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5973 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5974 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5975 		  "statistics", (int)sdebug_statistics);
5976 	return sdebug_info;
5977 }
5978 
5979 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)5980 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5981 				 int length)
5982 {
5983 	char arr[16];
5984 	int opts;
5985 	int minLen = length > 15 ? 15 : length;
5986 
5987 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5988 		return -EACCES;
5989 	memcpy(arr, buffer, minLen);
5990 	arr[minLen] = '\0';
5991 	if (1 != sscanf(arr, "%d", &opts))
5992 		return -EINVAL;
5993 	sdebug_opts = opts;
5994 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5995 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5996 	if (sdebug_every_nth != 0)
5997 		tweak_cmnd_count();
5998 	return length;
5999 }
6000 
6001 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6002  * same for each scsi_debug host (if more than one). Some of the counters
6003  * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)6004 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6005 {
6006 	int f, j, l;
6007 	struct sdebug_queue *sqp;
6008 	struct sdebug_host_info *sdhp;
6009 
6010 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6011 		   SDEBUG_VERSION, sdebug_version_date);
6012 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6013 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6014 		   sdebug_opts, sdebug_every_nth);
6015 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6016 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6017 		   sdebug_sector_size, "bytes");
6018 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6019 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6020 		   num_aborts);
6021 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6022 		   num_dev_resets, num_target_resets, num_bus_resets,
6023 		   num_host_resets);
6024 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6025 		   dix_reads, dix_writes, dif_errors);
6026 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6027 		   sdebug_statistics);
6028 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6029 		   atomic_read(&sdebug_cmnd_count),
6030 		   atomic_read(&sdebug_completions),
6031 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6032 		   atomic_read(&sdebug_a_tsf),
6033 		   atomic_read(&sdeb_mq_poll_count));
6034 
6035 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6036 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6037 		seq_printf(m, "  queue %d:\n", j);
6038 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6039 		if (f != sdebug_max_queue) {
6040 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6041 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6042 				   "first,last bits", f, l);
6043 		}
6044 	}
6045 
6046 	seq_printf(m, "this host_no=%d\n", host->host_no);
6047 	if (!xa_empty(per_store_ap)) {
6048 		bool niu;
6049 		int idx;
6050 		unsigned long l_idx;
6051 		struct sdeb_store_info *sip;
6052 
6053 		seq_puts(m, "\nhost list:\n");
6054 		j = 0;
6055 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6056 			idx = sdhp->si_idx;
6057 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6058 				   sdhp->shost->host_no, idx);
6059 			++j;
6060 		}
6061 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6062 			   sdeb_most_recent_idx);
6063 		j = 0;
6064 		xa_for_each(per_store_ap, l_idx, sip) {
6065 			niu = xa_get_mark(per_store_ap, l_idx,
6066 					  SDEB_XA_NOT_IN_USE);
6067 			idx = (int)l_idx;
6068 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6069 				   (niu ? "  not_in_use" : ""));
6070 			++j;
6071 		}
6072 	}
6073 	return 0;
6074 }
6075 
delay_show(struct device_driver * ddp,char * buf)6076 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6077 {
6078 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6079 }
6080 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6081  * of delay is jiffies.
6082  */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6083 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6084 			   size_t count)
6085 {
6086 	int jdelay, res;
6087 
6088 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6089 		res = count;
6090 		if (sdebug_jdelay != jdelay) {
6091 			int j, k;
6092 			struct sdebug_queue *sqp;
6093 
6094 			block_unblock_all_queues(true);
6095 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6096 			     ++j, ++sqp) {
6097 				k = find_first_bit(sqp->in_use_bm,
6098 						   sdebug_max_queue);
6099 				if (k != sdebug_max_queue) {
6100 					res = -EBUSY;   /* queued commands */
6101 					break;
6102 				}
6103 			}
6104 			if (res > 0) {
6105 				sdebug_jdelay = jdelay;
6106 				sdebug_ndelay = 0;
6107 			}
6108 			block_unblock_all_queues(false);
6109 		}
6110 		return res;
6111 	}
6112 	return -EINVAL;
6113 }
6114 static DRIVER_ATTR_RW(delay);
6115 
ndelay_show(struct device_driver * ddp,char * buf)6116 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6117 {
6118 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6119 }
6120 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6121 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6122 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6123 			    size_t count)
6124 {
6125 	int ndelay, res;
6126 
6127 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6128 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6129 		res = count;
6130 		if (sdebug_ndelay != ndelay) {
6131 			int j, k;
6132 			struct sdebug_queue *sqp;
6133 
6134 			block_unblock_all_queues(true);
6135 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6136 			     ++j, ++sqp) {
6137 				k = find_first_bit(sqp->in_use_bm,
6138 						   sdebug_max_queue);
6139 				if (k != sdebug_max_queue) {
6140 					res = -EBUSY;   /* queued commands */
6141 					break;
6142 				}
6143 			}
6144 			if (res > 0) {
6145 				sdebug_ndelay = ndelay;
6146 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6147 							: DEF_JDELAY;
6148 			}
6149 			block_unblock_all_queues(false);
6150 		}
6151 		return res;
6152 	}
6153 	return -EINVAL;
6154 }
6155 static DRIVER_ATTR_RW(ndelay);
6156 
opts_show(struct device_driver * ddp,char * buf)6157 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6158 {
6159 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6160 }
6161 
opts_store(struct device_driver * ddp,const char * buf,size_t count)6162 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6163 			  size_t count)
6164 {
6165 	int opts;
6166 	char work[20];
6167 
6168 	if (sscanf(buf, "%10s", work) == 1) {
6169 		if (strncasecmp(work, "0x", 2) == 0) {
6170 			if (kstrtoint(work + 2, 16, &opts) == 0)
6171 				goto opts_done;
6172 		} else {
6173 			if (kstrtoint(work, 10, &opts) == 0)
6174 				goto opts_done;
6175 		}
6176 	}
6177 	return -EINVAL;
6178 opts_done:
6179 	sdebug_opts = opts;
6180 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6181 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6182 	tweak_cmnd_count();
6183 	return count;
6184 }
6185 static DRIVER_ATTR_RW(opts);
6186 
ptype_show(struct device_driver * ddp,char * buf)6187 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6188 {
6189 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6190 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6191 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6192 			   size_t count)
6193 {
6194 	int n;
6195 
6196 	/* Cannot change from or to TYPE_ZBC with sysfs */
6197 	if (sdebug_ptype == TYPE_ZBC)
6198 		return -EINVAL;
6199 
6200 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6201 		if (n == TYPE_ZBC)
6202 			return -EINVAL;
6203 		sdebug_ptype = n;
6204 		return count;
6205 	}
6206 	return -EINVAL;
6207 }
6208 static DRIVER_ATTR_RW(ptype);
6209 
dsense_show(struct device_driver * ddp,char * buf)6210 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6211 {
6212 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6213 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)6214 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6215 			    size_t count)
6216 {
6217 	int n;
6218 
6219 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6220 		sdebug_dsense = n;
6221 		return count;
6222 	}
6223 	return -EINVAL;
6224 }
6225 static DRIVER_ATTR_RW(dsense);
6226 
fake_rw_show(struct device_driver * ddp,char * buf)6227 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6228 {
6229 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6230 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)6231 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6232 			     size_t count)
6233 {
6234 	int n, idx;
6235 
6236 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6237 		bool want_store = (n == 0);
6238 		struct sdebug_host_info *sdhp;
6239 
6240 		n = (n > 0);
6241 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6242 		if (sdebug_fake_rw == n)
6243 			return count;	/* not transitioning so do nothing */
6244 
6245 		if (want_store) {	/* 1 --> 0 transition, set up store */
6246 			if (sdeb_first_idx < 0) {
6247 				idx = sdebug_add_store();
6248 				if (idx < 0)
6249 					return idx;
6250 			} else {
6251 				idx = sdeb_first_idx;
6252 				xa_clear_mark(per_store_ap, idx,
6253 					      SDEB_XA_NOT_IN_USE);
6254 			}
6255 			/* make all hosts use same store */
6256 			list_for_each_entry(sdhp, &sdebug_host_list,
6257 					    host_list) {
6258 				if (sdhp->si_idx != idx) {
6259 					xa_set_mark(per_store_ap, sdhp->si_idx,
6260 						    SDEB_XA_NOT_IN_USE);
6261 					sdhp->si_idx = idx;
6262 				}
6263 			}
6264 			sdeb_most_recent_idx = idx;
6265 		} else {	/* 0 --> 1 transition is trigger for shrink */
6266 			sdebug_erase_all_stores(true /* apart from first */);
6267 		}
6268 		sdebug_fake_rw = n;
6269 		return count;
6270 	}
6271 	return -EINVAL;
6272 }
6273 static DRIVER_ATTR_RW(fake_rw);
6274 
no_lun_0_show(struct device_driver * ddp,char * buf)6275 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6276 {
6277 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6278 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)6279 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6280 			      size_t count)
6281 {
6282 	int n;
6283 
6284 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6285 		sdebug_no_lun_0 = n;
6286 		return count;
6287 	}
6288 	return -EINVAL;
6289 }
6290 static DRIVER_ATTR_RW(no_lun_0);
6291 
num_tgts_show(struct device_driver * ddp,char * buf)6292 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6293 {
6294 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6295 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)6296 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6297 			      size_t count)
6298 {
6299 	int n;
6300 
6301 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6302 		sdebug_num_tgts = n;
6303 		sdebug_max_tgts_luns();
6304 		return count;
6305 	}
6306 	return -EINVAL;
6307 }
6308 static DRIVER_ATTR_RW(num_tgts);
6309 
dev_size_mb_show(struct device_driver * ddp,char * buf)6310 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6311 {
6312 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6313 }
6314 static DRIVER_ATTR_RO(dev_size_mb);
6315 
per_host_store_show(struct device_driver * ddp,char * buf)6316 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6317 {
6318 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6319 }
6320 
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)6321 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6322 				    size_t count)
6323 {
6324 	bool v;
6325 
6326 	if (kstrtobool(buf, &v))
6327 		return -EINVAL;
6328 
6329 	sdebug_per_host_store = v;
6330 	return count;
6331 }
6332 static DRIVER_ATTR_RW(per_host_store);
6333 
num_parts_show(struct device_driver * ddp,char * buf)6334 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6335 {
6336 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6337 }
6338 static DRIVER_ATTR_RO(num_parts);
6339 
every_nth_show(struct device_driver * ddp,char * buf)6340 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6341 {
6342 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6343 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)6344 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6345 			       size_t count)
6346 {
6347 	int nth;
6348 	char work[20];
6349 
6350 	if (sscanf(buf, "%10s", work) == 1) {
6351 		if (strncasecmp(work, "0x", 2) == 0) {
6352 			if (kstrtoint(work + 2, 16, &nth) == 0)
6353 				goto every_nth_done;
6354 		} else {
6355 			if (kstrtoint(work, 10, &nth) == 0)
6356 				goto every_nth_done;
6357 		}
6358 	}
6359 	return -EINVAL;
6360 
6361 every_nth_done:
6362 	sdebug_every_nth = nth;
6363 	if (nth && !sdebug_statistics) {
6364 		pr_info("every_nth needs statistics=1, set it\n");
6365 		sdebug_statistics = true;
6366 	}
6367 	tweak_cmnd_count();
6368 	return count;
6369 }
6370 static DRIVER_ATTR_RW(every_nth);
6371 
lun_format_show(struct device_driver * ddp,char * buf)6372 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6373 {
6374 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6375 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)6376 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6377 				size_t count)
6378 {
6379 	int n;
6380 	bool changed;
6381 
6382 	if (kstrtoint(buf, 0, &n))
6383 		return -EINVAL;
6384 	if (n >= 0) {
6385 		if (n > (int)SAM_LUN_AM_FLAT) {
6386 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6387 			return -EINVAL;
6388 		}
6389 		changed = ((int)sdebug_lun_am != n);
6390 		sdebug_lun_am = n;
6391 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6392 			struct sdebug_host_info *sdhp;
6393 			struct sdebug_dev_info *dp;
6394 
6395 			spin_lock(&sdebug_host_list_lock);
6396 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6397 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6398 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6399 				}
6400 			}
6401 			spin_unlock(&sdebug_host_list_lock);
6402 		}
6403 		return count;
6404 	}
6405 	return -EINVAL;
6406 }
6407 static DRIVER_ATTR_RW(lun_format);
6408 
max_luns_show(struct device_driver * ddp,char * buf)6409 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6410 {
6411 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6412 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)6413 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6414 			      size_t count)
6415 {
6416 	int n;
6417 	bool changed;
6418 
6419 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6420 		if (n > 256) {
6421 			pr_warn("max_luns can be no more than 256\n");
6422 			return -EINVAL;
6423 		}
6424 		changed = (sdebug_max_luns != n);
6425 		sdebug_max_luns = n;
6426 		sdebug_max_tgts_luns();
6427 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6428 			struct sdebug_host_info *sdhp;
6429 			struct sdebug_dev_info *dp;
6430 
6431 			spin_lock(&sdebug_host_list_lock);
6432 			list_for_each_entry(sdhp, &sdebug_host_list,
6433 					    host_list) {
6434 				list_for_each_entry(dp, &sdhp->dev_info_list,
6435 						    dev_list) {
6436 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6437 						dp->uas_bm);
6438 				}
6439 			}
6440 			spin_unlock(&sdebug_host_list_lock);
6441 		}
6442 		return count;
6443 	}
6444 	return -EINVAL;
6445 }
6446 static DRIVER_ATTR_RW(max_luns);
6447 
max_queue_show(struct device_driver * ddp,char * buf)6448 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6449 {
6450 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6451 }
6452 /* N.B. max_queue can be changed while there are queued commands. In flight
6453  * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)6454 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6455 			       size_t count)
6456 {
6457 	int j, n, k, a;
6458 	struct sdebug_queue *sqp;
6459 
6460 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6461 	    (n <= SDEBUG_CANQUEUE) &&
6462 	    (sdebug_host_max_queue == 0)) {
6463 		block_unblock_all_queues(true);
6464 		k = 0;
6465 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6466 		     ++j, ++sqp) {
6467 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6468 			if (a > k)
6469 				k = a;
6470 		}
6471 		sdebug_max_queue = n;
6472 		if (k == SDEBUG_CANQUEUE)
6473 			atomic_set(&retired_max_queue, 0);
6474 		else if (k >= n)
6475 			atomic_set(&retired_max_queue, k + 1);
6476 		else
6477 			atomic_set(&retired_max_queue, 0);
6478 		block_unblock_all_queues(false);
6479 		return count;
6480 	}
6481 	return -EINVAL;
6482 }
6483 static DRIVER_ATTR_RW(max_queue);
6484 
host_max_queue_show(struct device_driver * ddp,char * buf)6485 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6486 {
6487 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6488 }
6489 
no_rwlock_show(struct device_driver * ddp,char * buf)6490 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6491 {
6492 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6493 }
6494 
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)6495 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6496 {
6497 	bool v;
6498 
6499 	if (kstrtobool(buf, &v))
6500 		return -EINVAL;
6501 
6502 	sdebug_no_rwlock = v;
6503 	return count;
6504 }
6505 static DRIVER_ATTR_RW(no_rwlock);
6506 
6507 /*
6508  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6509  * in range [0, sdebug_host_max_queue), we can't change it.
6510  */
6511 static DRIVER_ATTR_RO(host_max_queue);
6512 
no_uld_show(struct device_driver * ddp,char * buf)6513 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6514 {
6515 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6516 }
6517 static DRIVER_ATTR_RO(no_uld);
6518 
scsi_level_show(struct device_driver * ddp,char * buf)6519 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6520 {
6521 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6522 }
6523 static DRIVER_ATTR_RO(scsi_level);
6524 
virtual_gb_show(struct device_driver * ddp,char * buf)6525 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6526 {
6527 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6528 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)6529 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6530 				size_t count)
6531 {
6532 	int n;
6533 	bool changed;
6534 
6535 	/* Ignore capacity change for ZBC drives for now */
6536 	if (sdeb_zbc_in_use)
6537 		return -ENOTSUPP;
6538 
6539 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6540 		changed = (sdebug_virtual_gb != n);
6541 		sdebug_virtual_gb = n;
6542 		sdebug_capacity = get_sdebug_capacity();
6543 		if (changed) {
6544 			struct sdebug_host_info *sdhp;
6545 			struct sdebug_dev_info *dp;
6546 
6547 			spin_lock(&sdebug_host_list_lock);
6548 			list_for_each_entry(sdhp, &sdebug_host_list,
6549 					    host_list) {
6550 				list_for_each_entry(dp, &sdhp->dev_info_list,
6551 						    dev_list) {
6552 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6553 						dp->uas_bm);
6554 				}
6555 			}
6556 			spin_unlock(&sdebug_host_list_lock);
6557 		}
6558 		return count;
6559 	}
6560 	return -EINVAL;
6561 }
6562 static DRIVER_ATTR_RW(virtual_gb);
6563 
add_host_show(struct device_driver * ddp,char * buf)6564 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6565 {
6566 	/* absolute number of hosts currently active is what is shown */
6567 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6568 }
6569 
add_host_store(struct device_driver * ddp,const char * buf,size_t count)6570 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6571 			      size_t count)
6572 {
6573 	bool found;
6574 	unsigned long idx;
6575 	struct sdeb_store_info *sip;
6576 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6577 	int delta_hosts;
6578 
6579 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6580 		return -EINVAL;
6581 	if (delta_hosts > 0) {
6582 		do {
6583 			found = false;
6584 			if (want_phs) {
6585 				xa_for_each_marked(per_store_ap, idx, sip,
6586 						   SDEB_XA_NOT_IN_USE) {
6587 					sdeb_most_recent_idx = (int)idx;
6588 					found = true;
6589 					break;
6590 				}
6591 				if (found)	/* re-use case */
6592 					sdebug_add_host_helper((int)idx);
6593 				else
6594 					sdebug_do_add_host(true);
6595 			} else {
6596 				sdebug_do_add_host(false);
6597 			}
6598 		} while (--delta_hosts);
6599 	} else if (delta_hosts < 0) {
6600 		do {
6601 			sdebug_do_remove_host(false);
6602 		} while (++delta_hosts);
6603 	}
6604 	return count;
6605 }
6606 static DRIVER_ATTR_RW(add_host);
6607 
vpd_use_hostno_show(struct device_driver * ddp,char * buf)6608 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6609 {
6610 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6611 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)6612 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6613 				    size_t count)
6614 {
6615 	int n;
6616 
6617 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6618 		sdebug_vpd_use_hostno = n;
6619 		return count;
6620 	}
6621 	return -EINVAL;
6622 }
6623 static DRIVER_ATTR_RW(vpd_use_hostno);
6624 
statistics_show(struct device_driver * ddp,char * buf)6625 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6626 {
6627 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6628 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)6629 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6630 				size_t count)
6631 {
6632 	int n;
6633 
6634 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6635 		if (n > 0)
6636 			sdebug_statistics = true;
6637 		else {
6638 			clear_queue_stats();
6639 			sdebug_statistics = false;
6640 		}
6641 		return count;
6642 	}
6643 	return -EINVAL;
6644 }
6645 static DRIVER_ATTR_RW(statistics);
6646 
sector_size_show(struct device_driver * ddp,char * buf)6647 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6648 {
6649 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6650 }
6651 static DRIVER_ATTR_RO(sector_size);
6652 
submit_queues_show(struct device_driver * ddp,char * buf)6653 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6654 {
6655 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6656 }
6657 static DRIVER_ATTR_RO(submit_queues);
6658 
dix_show(struct device_driver * ddp,char * buf)6659 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6660 {
6661 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6662 }
6663 static DRIVER_ATTR_RO(dix);
6664 
dif_show(struct device_driver * ddp,char * buf)6665 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6666 {
6667 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6668 }
6669 static DRIVER_ATTR_RO(dif);
6670 
guard_show(struct device_driver * ddp,char * buf)6671 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6672 {
6673 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6674 }
6675 static DRIVER_ATTR_RO(guard);
6676 
ato_show(struct device_driver * ddp,char * buf)6677 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6678 {
6679 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6680 }
6681 static DRIVER_ATTR_RO(ato);
6682 
map_show(struct device_driver * ddp,char * buf)6683 static ssize_t map_show(struct device_driver *ddp, char *buf)
6684 {
6685 	ssize_t count = 0;
6686 
6687 	if (!scsi_debug_lbp())
6688 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6689 				 sdebug_store_sectors);
6690 
6691 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6692 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6693 
6694 		if (sip)
6695 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6696 					  (int)map_size, sip->map_storep);
6697 	}
6698 	buf[count++] = '\n';
6699 	buf[count] = '\0';
6700 
6701 	return count;
6702 }
6703 static DRIVER_ATTR_RO(map);
6704 
random_show(struct device_driver * ddp,char * buf)6705 static ssize_t random_show(struct device_driver *ddp, char *buf)
6706 {
6707 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6708 }
6709 
random_store(struct device_driver * ddp,const char * buf,size_t count)6710 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6711 			    size_t count)
6712 {
6713 	bool v;
6714 
6715 	if (kstrtobool(buf, &v))
6716 		return -EINVAL;
6717 
6718 	sdebug_random = v;
6719 	return count;
6720 }
6721 static DRIVER_ATTR_RW(random);
6722 
removable_show(struct device_driver * ddp,char * buf)6723 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6724 {
6725 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6726 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)6727 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6728 			       size_t count)
6729 {
6730 	int n;
6731 
6732 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6733 		sdebug_removable = (n > 0);
6734 		return count;
6735 	}
6736 	return -EINVAL;
6737 }
6738 static DRIVER_ATTR_RW(removable);
6739 
host_lock_show(struct device_driver * ddp,char * buf)6740 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6741 {
6742 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6743 }
6744 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)6745 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6746 			       size_t count)
6747 {
6748 	int n;
6749 
6750 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6751 		sdebug_host_lock = (n > 0);
6752 		return count;
6753 	}
6754 	return -EINVAL;
6755 }
6756 static DRIVER_ATTR_RW(host_lock);
6757 
strict_show(struct device_driver * ddp,char * buf)6758 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6759 {
6760 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6761 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)6762 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6763 			    size_t count)
6764 {
6765 	int n;
6766 
6767 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6768 		sdebug_strict = (n > 0);
6769 		return count;
6770 	}
6771 	return -EINVAL;
6772 }
6773 static DRIVER_ATTR_RW(strict);
6774 
uuid_ctl_show(struct device_driver * ddp,char * buf)6775 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6776 {
6777 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6778 }
6779 static DRIVER_ATTR_RO(uuid_ctl);
6780 
cdb_len_show(struct device_driver * ddp,char * buf)6781 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6782 {
6783 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6784 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)6785 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6786 			     size_t count)
6787 {
6788 	int ret, n;
6789 
6790 	ret = kstrtoint(buf, 0, &n);
6791 	if (ret)
6792 		return ret;
6793 	sdebug_cdb_len = n;
6794 	all_config_cdb_len();
6795 	return count;
6796 }
6797 static DRIVER_ATTR_RW(cdb_len);
6798 
6799 static const char * const zbc_model_strs_a[] = {
6800 	[BLK_ZONED_NONE] = "none",
6801 	[BLK_ZONED_HA]   = "host-aware",
6802 	[BLK_ZONED_HM]   = "host-managed",
6803 };
6804 
6805 static const char * const zbc_model_strs_b[] = {
6806 	[BLK_ZONED_NONE] = "no",
6807 	[BLK_ZONED_HA]   = "aware",
6808 	[BLK_ZONED_HM]   = "managed",
6809 };
6810 
6811 static const char * const zbc_model_strs_c[] = {
6812 	[BLK_ZONED_NONE] = "0",
6813 	[BLK_ZONED_HA]   = "1",
6814 	[BLK_ZONED_HM]   = "2",
6815 };
6816 
sdeb_zbc_model_str(const char * cp)6817 static int sdeb_zbc_model_str(const char *cp)
6818 {
6819 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6820 
6821 	if (res < 0) {
6822 		res = sysfs_match_string(zbc_model_strs_b, cp);
6823 		if (res < 0) {
6824 			res = sysfs_match_string(zbc_model_strs_c, cp);
6825 			if (res < 0)
6826 				return -EINVAL;
6827 		}
6828 	}
6829 	return res;
6830 }
6831 
zbc_show(struct device_driver * ddp,char * buf)6832 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6833 {
6834 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6835 			 zbc_model_strs_a[sdeb_zbc_model]);
6836 }
6837 static DRIVER_ATTR_RO(zbc);
6838 
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)6839 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6840 {
6841 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6842 }
6843 static DRIVER_ATTR_RO(tur_ms_to_ready);
6844 
6845 /* Note: The following array creates attribute files in the
6846    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6847    files (over those found in the /sys/module/scsi_debug/parameters
6848    directory) is that auxiliary actions can be triggered when an attribute
6849    is changed. For example see: add_host_store() above.
6850  */
6851 
6852 static struct attribute *sdebug_drv_attrs[] = {
6853 	&driver_attr_delay.attr,
6854 	&driver_attr_opts.attr,
6855 	&driver_attr_ptype.attr,
6856 	&driver_attr_dsense.attr,
6857 	&driver_attr_fake_rw.attr,
6858 	&driver_attr_host_max_queue.attr,
6859 	&driver_attr_no_lun_0.attr,
6860 	&driver_attr_num_tgts.attr,
6861 	&driver_attr_dev_size_mb.attr,
6862 	&driver_attr_num_parts.attr,
6863 	&driver_attr_every_nth.attr,
6864 	&driver_attr_lun_format.attr,
6865 	&driver_attr_max_luns.attr,
6866 	&driver_attr_max_queue.attr,
6867 	&driver_attr_no_rwlock.attr,
6868 	&driver_attr_no_uld.attr,
6869 	&driver_attr_scsi_level.attr,
6870 	&driver_attr_virtual_gb.attr,
6871 	&driver_attr_add_host.attr,
6872 	&driver_attr_per_host_store.attr,
6873 	&driver_attr_vpd_use_hostno.attr,
6874 	&driver_attr_sector_size.attr,
6875 	&driver_attr_statistics.attr,
6876 	&driver_attr_submit_queues.attr,
6877 	&driver_attr_dix.attr,
6878 	&driver_attr_dif.attr,
6879 	&driver_attr_guard.attr,
6880 	&driver_attr_ato.attr,
6881 	&driver_attr_map.attr,
6882 	&driver_attr_random.attr,
6883 	&driver_attr_removable.attr,
6884 	&driver_attr_host_lock.attr,
6885 	&driver_attr_ndelay.attr,
6886 	&driver_attr_strict.attr,
6887 	&driver_attr_uuid_ctl.attr,
6888 	&driver_attr_cdb_len.attr,
6889 	&driver_attr_tur_ms_to_ready.attr,
6890 	&driver_attr_zbc.attr,
6891 	NULL,
6892 };
6893 ATTRIBUTE_GROUPS(sdebug_drv);
6894 
6895 static struct device *pseudo_primary;
6896 
scsi_debug_init(void)6897 static int __init scsi_debug_init(void)
6898 {
6899 	bool want_store = (sdebug_fake_rw == 0);
6900 	unsigned long sz;
6901 	int k, ret, hosts_to_add;
6902 	int idx = -1;
6903 
6904 	ramdisk_lck_a[0] = &atomic_rw;
6905 	ramdisk_lck_a[1] = &atomic_rw2;
6906 	atomic_set(&retired_max_queue, 0);
6907 
6908 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6909 		pr_warn("ndelay must be less than 1 second, ignored\n");
6910 		sdebug_ndelay = 0;
6911 	} else if (sdebug_ndelay > 0)
6912 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6913 
6914 	switch (sdebug_sector_size) {
6915 	case  512:
6916 	case 1024:
6917 	case 2048:
6918 	case 4096:
6919 		break;
6920 	default:
6921 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6922 		return -EINVAL;
6923 	}
6924 
6925 	switch (sdebug_dif) {
6926 	case T10_PI_TYPE0_PROTECTION:
6927 		break;
6928 	case T10_PI_TYPE1_PROTECTION:
6929 	case T10_PI_TYPE2_PROTECTION:
6930 	case T10_PI_TYPE3_PROTECTION:
6931 		have_dif_prot = true;
6932 		break;
6933 
6934 	default:
6935 		pr_err("dif must be 0, 1, 2 or 3\n");
6936 		return -EINVAL;
6937 	}
6938 
6939 	if (sdebug_num_tgts < 0) {
6940 		pr_err("num_tgts must be >= 0\n");
6941 		return -EINVAL;
6942 	}
6943 
6944 	if (sdebug_guard > 1) {
6945 		pr_err("guard must be 0 or 1\n");
6946 		return -EINVAL;
6947 	}
6948 
6949 	if (sdebug_ato > 1) {
6950 		pr_err("ato must be 0 or 1\n");
6951 		return -EINVAL;
6952 	}
6953 
6954 	if (sdebug_physblk_exp > 15) {
6955 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6956 		return -EINVAL;
6957 	}
6958 
6959 	sdebug_lun_am = sdebug_lun_am_i;
6960 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6961 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6962 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6963 	}
6964 
6965 	if (sdebug_max_luns > 256) {
6966 		if (sdebug_max_luns > 16384) {
6967 			pr_warn("max_luns can be no more than 16384, use default\n");
6968 			sdebug_max_luns = DEF_MAX_LUNS;
6969 		}
6970 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6971 	}
6972 
6973 	if (sdebug_lowest_aligned > 0x3fff) {
6974 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6975 		return -EINVAL;
6976 	}
6977 
6978 	if (submit_queues < 1) {
6979 		pr_err("submit_queues must be 1 or more\n");
6980 		return -EINVAL;
6981 	}
6982 
6983 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6984 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6985 		return -EINVAL;
6986 	}
6987 
6988 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6989 	    (sdebug_host_max_queue < 0)) {
6990 		pr_err("host_max_queue must be in range [0 %d]\n",
6991 		       SDEBUG_CANQUEUE);
6992 		return -EINVAL;
6993 	}
6994 
6995 	if (sdebug_host_max_queue &&
6996 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6997 		sdebug_max_queue = sdebug_host_max_queue;
6998 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6999 			sdebug_max_queue);
7000 	}
7001 
7002 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
7003 			       GFP_KERNEL);
7004 	if (sdebug_q_arr == NULL)
7005 		return -ENOMEM;
7006 	for (k = 0; k < submit_queues; ++k)
7007 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
7008 
7009 	/*
7010 	 * check for host managed zoned block device specified with
7011 	 * ptype=0x14 or zbc=XXX.
7012 	 */
7013 	if (sdebug_ptype == TYPE_ZBC) {
7014 		sdeb_zbc_model = BLK_ZONED_HM;
7015 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7016 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7017 		if (k < 0) {
7018 			ret = k;
7019 			goto free_q_arr;
7020 		}
7021 		sdeb_zbc_model = k;
7022 		switch (sdeb_zbc_model) {
7023 		case BLK_ZONED_NONE:
7024 		case BLK_ZONED_HA:
7025 			sdebug_ptype = TYPE_DISK;
7026 			break;
7027 		case BLK_ZONED_HM:
7028 			sdebug_ptype = TYPE_ZBC;
7029 			break;
7030 		default:
7031 			pr_err("Invalid ZBC model\n");
7032 			ret = -EINVAL;
7033 			goto free_q_arr;
7034 		}
7035 	}
7036 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7037 		sdeb_zbc_in_use = true;
7038 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7039 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7040 	}
7041 
7042 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7043 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7044 	if (sdebug_dev_size_mb < 1)
7045 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7046 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7047 	sdebug_store_sectors = sz / sdebug_sector_size;
7048 	sdebug_capacity = get_sdebug_capacity();
7049 
7050 	/* play around with geometry, don't waste too much on track 0 */
7051 	sdebug_heads = 8;
7052 	sdebug_sectors_per = 32;
7053 	if (sdebug_dev_size_mb >= 256)
7054 		sdebug_heads = 64;
7055 	else if (sdebug_dev_size_mb >= 16)
7056 		sdebug_heads = 32;
7057 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7058 			       (sdebug_sectors_per * sdebug_heads);
7059 	if (sdebug_cylinders_per >= 1024) {
7060 		/* other LLDs do this; implies >= 1GB ram disk ... */
7061 		sdebug_heads = 255;
7062 		sdebug_sectors_per = 63;
7063 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7064 			       (sdebug_sectors_per * sdebug_heads);
7065 	}
7066 	if (scsi_debug_lbp()) {
7067 		sdebug_unmap_max_blocks =
7068 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7069 
7070 		sdebug_unmap_max_desc =
7071 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7072 
7073 		sdebug_unmap_granularity =
7074 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7075 
7076 		if (sdebug_unmap_alignment &&
7077 		    sdebug_unmap_granularity <=
7078 		    sdebug_unmap_alignment) {
7079 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7080 			ret = -EINVAL;
7081 			goto free_q_arr;
7082 		}
7083 	}
7084 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7085 	if (want_store) {
7086 		idx = sdebug_add_store();
7087 		if (idx < 0) {
7088 			ret = idx;
7089 			goto free_q_arr;
7090 		}
7091 	}
7092 
7093 	pseudo_primary = root_device_register("pseudo_0");
7094 	if (IS_ERR(pseudo_primary)) {
7095 		pr_warn("root_device_register() error\n");
7096 		ret = PTR_ERR(pseudo_primary);
7097 		goto free_vm;
7098 	}
7099 	ret = bus_register(&pseudo_lld_bus);
7100 	if (ret < 0) {
7101 		pr_warn("bus_register error: %d\n", ret);
7102 		goto dev_unreg;
7103 	}
7104 	ret = driver_register(&sdebug_driverfs_driver);
7105 	if (ret < 0) {
7106 		pr_warn("driver_register error: %d\n", ret);
7107 		goto bus_unreg;
7108 	}
7109 
7110 	hosts_to_add = sdebug_add_host;
7111 	sdebug_add_host = 0;
7112 
7113 	for (k = 0; k < hosts_to_add; k++) {
7114 		if (want_store && k == 0) {
7115 			ret = sdebug_add_host_helper(idx);
7116 			if (ret < 0) {
7117 				pr_err("add_host_helper k=%d, error=%d\n",
7118 				       k, -ret);
7119 				break;
7120 			}
7121 		} else {
7122 			ret = sdebug_do_add_host(want_store &&
7123 						 sdebug_per_host_store);
7124 			if (ret < 0) {
7125 				pr_err("add_host k=%d error=%d\n", k, -ret);
7126 				break;
7127 			}
7128 		}
7129 	}
7130 	if (sdebug_verbose)
7131 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7132 
7133 	return 0;
7134 
7135 bus_unreg:
7136 	bus_unregister(&pseudo_lld_bus);
7137 dev_unreg:
7138 	root_device_unregister(pseudo_primary);
7139 free_vm:
7140 	sdebug_erase_store(idx, NULL);
7141 free_q_arr:
7142 	kfree(sdebug_q_arr);
7143 	return ret;
7144 }
7145 
scsi_debug_exit(void)7146 static void __exit scsi_debug_exit(void)
7147 {
7148 	int k = sdebug_num_hosts;
7149 
7150 	stop_all_queued();
7151 	for (; k; k--)
7152 		sdebug_do_remove_host(true);
7153 	free_all_queued();
7154 	driver_unregister(&sdebug_driverfs_driver);
7155 	bus_unregister(&pseudo_lld_bus);
7156 	root_device_unregister(pseudo_primary);
7157 
7158 	sdebug_erase_all_stores(false);
7159 	xa_destroy(per_store_ap);
7160 	kfree(sdebug_q_arr);
7161 }
7162 
7163 device_initcall(scsi_debug_init);
7164 module_exit(scsi_debug_exit);
7165 
sdebug_release_adapter(struct device * dev)7166 static void sdebug_release_adapter(struct device *dev)
7167 {
7168 	struct sdebug_host_info *sdbg_host;
7169 
7170 	sdbg_host = to_sdebug_host(dev);
7171 	kfree(sdbg_host);
7172 }
7173 
7174 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7175 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7176 {
7177 	if (idx < 0)
7178 		return;
7179 	if (!sip) {
7180 		if (xa_empty(per_store_ap))
7181 			return;
7182 		sip = xa_load(per_store_ap, idx);
7183 		if (!sip)
7184 			return;
7185 	}
7186 	vfree(sip->map_storep);
7187 	vfree(sip->dif_storep);
7188 	vfree(sip->storep);
7189 	xa_erase(per_store_ap, idx);
7190 	kfree(sip);
7191 }
7192 
7193 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)7194 static void sdebug_erase_all_stores(bool apart_from_first)
7195 {
7196 	unsigned long idx;
7197 	struct sdeb_store_info *sip = NULL;
7198 
7199 	xa_for_each(per_store_ap, idx, sip) {
7200 		if (apart_from_first)
7201 			apart_from_first = false;
7202 		else
7203 			sdebug_erase_store(idx, sip);
7204 	}
7205 	if (apart_from_first)
7206 		sdeb_most_recent_idx = sdeb_first_idx;
7207 }
7208 
7209 /*
7210  * Returns store xarray new element index (idx) if >=0 else negated errno.
7211  * Limit the number of stores to 65536.
7212  */
sdebug_add_store(void)7213 static int sdebug_add_store(void)
7214 {
7215 	int res;
7216 	u32 n_idx;
7217 	unsigned long iflags;
7218 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7219 	struct sdeb_store_info *sip = NULL;
7220 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7221 
7222 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7223 	if (!sip)
7224 		return -ENOMEM;
7225 
7226 	xa_lock_irqsave(per_store_ap, iflags);
7227 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7228 	if (unlikely(res < 0)) {
7229 		xa_unlock_irqrestore(per_store_ap, iflags);
7230 		kfree(sip);
7231 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7232 		return res;
7233 	}
7234 	sdeb_most_recent_idx = n_idx;
7235 	if (sdeb_first_idx < 0)
7236 		sdeb_first_idx = n_idx;
7237 	xa_unlock_irqrestore(per_store_ap, iflags);
7238 
7239 	res = -ENOMEM;
7240 	sip->storep = vzalloc(sz);
7241 	if (!sip->storep) {
7242 		pr_err("user data oom\n");
7243 		goto err;
7244 	}
7245 	if (sdebug_num_parts > 0)
7246 		sdebug_build_parts(sip->storep, sz);
7247 
7248 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7249 	if (sdebug_dix) {
7250 		int dif_size;
7251 
7252 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7253 		sip->dif_storep = vmalloc(dif_size);
7254 
7255 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7256 			sip->dif_storep);
7257 
7258 		if (!sip->dif_storep) {
7259 			pr_err("DIX oom\n");
7260 			goto err;
7261 		}
7262 		memset(sip->dif_storep, 0xff, dif_size);
7263 	}
7264 	/* Logical Block Provisioning */
7265 	if (scsi_debug_lbp()) {
7266 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7267 		sip->map_storep = vmalloc(array_size(sizeof(long),
7268 						     BITS_TO_LONGS(map_size)));
7269 
7270 		pr_info("%lu provisioning blocks\n", map_size);
7271 
7272 		if (!sip->map_storep) {
7273 			pr_err("LBP map oom\n");
7274 			goto err;
7275 		}
7276 
7277 		bitmap_zero(sip->map_storep, map_size);
7278 
7279 		/* Map first 1KB for partition table */
7280 		if (sdebug_num_parts)
7281 			map_region(sip, 0, 2);
7282 	}
7283 
7284 	rwlock_init(&sip->macc_lck);
7285 	return (int)n_idx;
7286 err:
7287 	sdebug_erase_store((int)n_idx, sip);
7288 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7289 	return res;
7290 }
7291 
sdebug_add_host_helper(int per_host_idx)7292 static int sdebug_add_host_helper(int per_host_idx)
7293 {
7294 	int k, devs_per_host, idx;
7295 	int error = -ENOMEM;
7296 	struct sdebug_host_info *sdbg_host;
7297 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7298 
7299 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7300 	if (!sdbg_host)
7301 		return -ENOMEM;
7302 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7303 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7304 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7305 	sdbg_host->si_idx = idx;
7306 
7307 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7308 
7309 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7310 	for (k = 0; k < devs_per_host; k++) {
7311 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7312 		if (!sdbg_devinfo)
7313 			goto clean;
7314 	}
7315 
7316 	spin_lock(&sdebug_host_list_lock);
7317 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7318 	spin_unlock(&sdebug_host_list_lock);
7319 
7320 	sdbg_host->dev.bus = &pseudo_lld_bus;
7321 	sdbg_host->dev.parent = pseudo_primary;
7322 	sdbg_host->dev.release = &sdebug_release_adapter;
7323 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7324 
7325 	error = device_register(&sdbg_host->dev);
7326 	if (error) {
7327 		spin_lock(&sdebug_host_list_lock);
7328 		list_del(&sdbg_host->host_list);
7329 		spin_unlock(&sdebug_host_list_lock);
7330 		goto clean;
7331 	}
7332 
7333 	++sdebug_num_hosts;
7334 	return 0;
7335 
7336 clean:
7337 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7338 				 dev_list) {
7339 		list_del(&sdbg_devinfo->dev_list);
7340 		kfree(sdbg_devinfo->zstate);
7341 		kfree(sdbg_devinfo);
7342 	}
7343 	if (sdbg_host->dev.release)
7344 		put_device(&sdbg_host->dev);
7345 	else
7346 		kfree(sdbg_host);
7347 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7348 	return error;
7349 }
7350 
sdebug_do_add_host(bool mk_new_store)7351 static int sdebug_do_add_host(bool mk_new_store)
7352 {
7353 	int ph_idx = sdeb_most_recent_idx;
7354 
7355 	if (mk_new_store) {
7356 		ph_idx = sdebug_add_store();
7357 		if (ph_idx < 0)
7358 			return ph_idx;
7359 	}
7360 	return sdebug_add_host_helper(ph_idx);
7361 }
7362 
sdebug_do_remove_host(bool the_end)7363 static void sdebug_do_remove_host(bool the_end)
7364 {
7365 	int idx = -1;
7366 	struct sdebug_host_info *sdbg_host = NULL;
7367 	struct sdebug_host_info *sdbg_host2;
7368 
7369 	spin_lock(&sdebug_host_list_lock);
7370 	if (!list_empty(&sdebug_host_list)) {
7371 		sdbg_host = list_entry(sdebug_host_list.prev,
7372 				       struct sdebug_host_info, host_list);
7373 		idx = sdbg_host->si_idx;
7374 	}
7375 	if (!the_end && idx >= 0) {
7376 		bool unique = true;
7377 
7378 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7379 			if (sdbg_host2 == sdbg_host)
7380 				continue;
7381 			if (idx == sdbg_host2->si_idx) {
7382 				unique = false;
7383 				break;
7384 			}
7385 		}
7386 		if (unique) {
7387 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7388 			if (idx == sdeb_most_recent_idx)
7389 				--sdeb_most_recent_idx;
7390 		}
7391 	}
7392 	if (sdbg_host)
7393 		list_del(&sdbg_host->host_list);
7394 	spin_unlock(&sdebug_host_list_lock);
7395 
7396 	if (!sdbg_host)
7397 		return;
7398 
7399 	device_unregister(&sdbg_host->dev);
7400 	--sdebug_num_hosts;
7401 }
7402 
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)7403 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7404 {
7405 	int num_in_q = 0;
7406 	struct sdebug_dev_info *devip;
7407 
7408 	block_unblock_all_queues(true);
7409 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7410 	if (NULL == devip) {
7411 		block_unblock_all_queues(false);
7412 		return	-ENODEV;
7413 	}
7414 	num_in_q = atomic_read(&devip->num_in_q);
7415 
7416 	if (qdepth > SDEBUG_CANQUEUE) {
7417 		qdepth = SDEBUG_CANQUEUE;
7418 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7419 			qdepth, SDEBUG_CANQUEUE);
7420 	}
7421 	if (qdepth < 1)
7422 		qdepth = 1;
7423 	if (qdepth != sdev->queue_depth)
7424 		scsi_change_queue_depth(sdev, qdepth);
7425 
7426 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7427 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7428 			    __func__, qdepth, num_in_q);
7429 	}
7430 	block_unblock_all_queues(false);
7431 	return sdev->queue_depth;
7432 }
7433 
fake_timeout(struct scsi_cmnd * scp)7434 static bool fake_timeout(struct scsi_cmnd *scp)
7435 {
7436 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7437 		if (sdebug_every_nth < -1)
7438 			sdebug_every_nth = -1;
7439 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7440 			return true; /* ignore command causing timeout */
7441 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7442 			 scsi_medium_access_command(scp))
7443 			return true; /* time out reads and writes */
7444 	}
7445 	return false;
7446 }
7447 
7448 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)7449 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7450 {
7451 	int stopped_state;
7452 	u64 diff_ns = 0;
7453 	ktime_t now_ts = ktime_get_boottime();
7454 	struct scsi_device *sdp = scp->device;
7455 
7456 	stopped_state = atomic_read(&devip->stopped);
7457 	if (stopped_state == 2) {
7458 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7459 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7460 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7461 				/* tur_ms_to_ready timer extinguished */
7462 				atomic_set(&devip->stopped, 0);
7463 				return 0;
7464 			}
7465 		}
7466 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7467 		if (sdebug_verbose)
7468 			sdev_printk(KERN_INFO, sdp,
7469 				    "%s: Not ready: in process of becoming ready\n", my_name);
7470 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7471 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7472 
7473 			if (diff_ns <= tur_nanosecs_to_ready)
7474 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7475 			else
7476 				diff_ns = tur_nanosecs_to_ready;
7477 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7478 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7479 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7480 						   diff_ns);
7481 			return check_condition_result;
7482 		}
7483 	}
7484 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7485 	if (sdebug_verbose)
7486 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7487 			    my_name);
7488 	return check_condition_result;
7489 }
7490 
sdebug_map_queues(struct Scsi_Host * shost)7491 static void sdebug_map_queues(struct Scsi_Host *shost)
7492 {
7493 	int i, qoff;
7494 
7495 	if (shost->nr_hw_queues == 1)
7496 		return;
7497 
7498 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7499 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7500 
7501 		map->nr_queues  = 0;
7502 
7503 		if (i == HCTX_TYPE_DEFAULT)
7504 			map->nr_queues = submit_queues - poll_queues;
7505 		else if (i == HCTX_TYPE_POLL)
7506 			map->nr_queues = poll_queues;
7507 
7508 		if (!map->nr_queues) {
7509 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7510 			continue;
7511 		}
7512 
7513 		map->queue_offset = qoff;
7514 		blk_mq_map_queues(map);
7515 
7516 		qoff += map->nr_queues;
7517 	}
7518 }
7519 
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)7520 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7521 {
7522 	bool first;
7523 	bool retiring = false;
7524 	int num_entries = 0;
7525 	unsigned int qc_idx = 0;
7526 	unsigned long iflags;
7527 	ktime_t kt_from_boot = ktime_get_boottime();
7528 	struct sdebug_queue *sqp;
7529 	struct sdebug_queued_cmd *sqcp;
7530 	struct scsi_cmnd *scp;
7531 	struct sdebug_dev_info *devip;
7532 	struct sdebug_defer *sd_dp;
7533 
7534 	sqp = sdebug_q_arr + queue_num;
7535 
7536 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7537 
7538 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7539 	if (qc_idx >= sdebug_max_queue)
7540 		goto unlock;
7541 
7542 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7543 		if (first) {
7544 			first = false;
7545 			if (!test_bit(qc_idx, sqp->in_use_bm))
7546 				continue;
7547 		} else {
7548 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7549 		}
7550 		if (qc_idx >= sdebug_max_queue)
7551 			break;
7552 
7553 		sqcp = &sqp->qc_arr[qc_idx];
7554 		sd_dp = sqcp->sd_dp;
7555 		if (unlikely(!sd_dp))
7556 			continue;
7557 		scp = sqcp->a_cmnd;
7558 		if (unlikely(scp == NULL)) {
7559 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7560 			       queue_num, qc_idx, __func__);
7561 			break;
7562 		}
7563 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7564 			if (kt_from_boot < sd_dp->cmpl_ts)
7565 				continue;
7566 
7567 		} else		/* ignoring non REQ_POLLED requests */
7568 			continue;
7569 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7570 		if (likely(devip))
7571 			atomic_dec(&devip->num_in_q);
7572 		else
7573 			pr_err("devip=NULL from %s\n", __func__);
7574 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7575 			retiring = true;
7576 
7577 		sqcp->a_cmnd = NULL;
7578 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7579 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7580 				sqp, queue_num, qc_idx, __func__);
7581 			break;
7582 		}
7583 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7584 			int k, retval;
7585 
7586 			retval = atomic_read(&retired_max_queue);
7587 			if (qc_idx >= retval) {
7588 				pr_err("index %d too large\n", retval);
7589 				break;
7590 			}
7591 			k = find_last_bit(sqp->in_use_bm, retval);
7592 			if ((k < sdebug_max_queue) || (k == retval))
7593 				atomic_set(&retired_max_queue, 0);
7594 			else
7595 				atomic_set(&retired_max_queue, k + 1);
7596 		}
7597 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7598 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7599 		scsi_done(scp); /* callback to mid level */
7600 		num_entries++;
7601 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7602 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7603 			break;
7604 	}
7605 
7606 unlock:
7607 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7608 
7609 	if (num_entries > 0)
7610 		atomic_add(num_entries, &sdeb_mq_poll_count);
7611 	return num_entries;
7612 }
7613 
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)7614 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7615 				   struct scsi_cmnd *scp)
7616 {
7617 	u8 sdeb_i;
7618 	struct scsi_device *sdp = scp->device;
7619 	const struct opcode_info_t *oip;
7620 	const struct opcode_info_t *r_oip;
7621 	struct sdebug_dev_info *devip;
7622 	u8 *cmd = scp->cmnd;
7623 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7624 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7625 	int k, na;
7626 	int errsts = 0;
7627 	u64 lun_index = sdp->lun & 0x3FFF;
7628 	u32 flags;
7629 	u16 sa;
7630 	u8 opcode = cmd[0];
7631 	bool has_wlun_rl;
7632 	bool inject_now;
7633 
7634 	scsi_set_resid(scp, 0);
7635 	if (sdebug_statistics) {
7636 		atomic_inc(&sdebug_cmnd_count);
7637 		inject_now = inject_on_this_cmd();
7638 	} else {
7639 		inject_now = false;
7640 	}
7641 	if (unlikely(sdebug_verbose &&
7642 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7643 		char b[120];
7644 		int n, len, sb;
7645 
7646 		len = scp->cmd_len;
7647 		sb = (int)sizeof(b);
7648 		if (len > 32)
7649 			strcpy(b, "too long, over 32 bytes");
7650 		else {
7651 			for (k = 0, n = 0; k < len && n < sb; ++k)
7652 				n += scnprintf(b + n, sb - n, "%02x ",
7653 					       (u32)cmd[k]);
7654 		}
7655 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7656 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7657 	}
7658 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7659 		return SCSI_MLQUEUE_HOST_BUSY;
7660 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7661 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7662 		goto err_out;
7663 
7664 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7665 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7666 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7667 	if (unlikely(!devip)) {
7668 		devip = find_build_dev_info(sdp);
7669 		if (NULL == devip)
7670 			goto err_out;
7671 	}
7672 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7673 		atomic_set(&sdeb_inject_pending, 1);
7674 
7675 	na = oip->num_attached;
7676 	r_pfp = oip->pfp;
7677 	if (na) {	/* multiple commands with this opcode */
7678 		r_oip = oip;
7679 		if (FF_SA & r_oip->flags) {
7680 			if (F_SA_LOW & oip->flags)
7681 				sa = 0x1f & cmd[1];
7682 			else
7683 				sa = get_unaligned_be16(cmd + 8);
7684 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7685 				if (opcode == oip->opcode && sa == oip->sa)
7686 					break;
7687 			}
7688 		} else {   /* since no service action only check opcode */
7689 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7690 				if (opcode == oip->opcode)
7691 					break;
7692 			}
7693 		}
7694 		if (k > na) {
7695 			if (F_SA_LOW & r_oip->flags)
7696 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7697 			else if (F_SA_HIGH & r_oip->flags)
7698 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7699 			else
7700 				mk_sense_invalid_opcode(scp);
7701 			goto check_cond;
7702 		}
7703 	}	/* else (when na==0) we assume the oip is a match */
7704 	flags = oip->flags;
7705 	if (unlikely(F_INV_OP & flags)) {
7706 		mk_sense_invalid_opcode(scp);
7707 		goto check_cond;
7708 	}
7709 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7710 		if (sdebug_verbose)
7711 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7712 				    my_name, opcode, " supported for wlun");
7713 		mk_sense_invalid_opcode(scp);
7714 		goto check_cond;
7715 	}
7716 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7717 		u8 rem;
7718 		int j;
7719 
7720 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7721 			rem = ~oip->len_mask[k] & cmd[k];
7722 			if (rem) {
7723 				for (j = 7; j >= 0; --j, rem <<= 1) {
7724 					if (0x80 & rem)
7725 						break;
7726 				}
7727 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7728 				goto check_cond;
7729 			}
7730 		}
7731 	}
7732 	if (unlikely(!(F_SKIP_UA & flags) &&
7733 		     find_first_bit(devip->uas_bm,
7734 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7735 		errsts = make_ua(scp, devip);
7736 		if (errsts)
7737 			goto check_cond;
7738 	}
7739 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7740 		     atomic_read(&devip->stopped))) {
7741 		errsts = resp_not_ready(scp, devip);
7742 		if (errsts)
7743 			goto fini;
7744 	}
7745 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7746 		goto fini;
7747 	if (unlikely(sdebug_every_nth)) {
7748 		if (fake_timeout(scp))
7749 			return 0;	/* ignore command: make trouble */
7750 	}
7751 	if (likely(oip->pfp))
7752 		pfp = oip->pfp;	/* calls a resp_* function */
7753 	else
7754 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7755 
7756 fini:
7757 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7758 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7759 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7760 					    sdebug_ndelay > 10000)) {
7761 		/*
7762 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7763 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7764 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7765 		 * For Synchronize Cache want 1/20 of SSU's delay.
7766 		 */
7767 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7768 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7769 
7770 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7771 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7772 	} else
7773 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7774 				     sdebug_ndelay);
7775 check_cond:
7776 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7777 err_out:
7778 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7779 }
7780 
7781 static struct scsi_host_template sdebug_driver_template = {
7782 	.show_info =		scsi_debug_show_info,
7783 	.write_info =		scsi_debug_write_info,
7784 	.proc_name =		sdebug_proc_name,
7785 	.name =			"SCSI DEBUG",
7786 	.info =			scsi_debug_info,
7787 	.slave_alloc =		scsi_debug_slave_alloc,
7788 	.slave_configure =	scsi_debug_slave_configure,
7789 	.slave_destroy =	scsi_debug_slave_destroy,
7790 	.ioctl =		scsi_debug_ioctl,
7791 	.queuecommand =		scsi_debug_queuecommand,
7792 	.change_queue_depth =	sdebug_change_qdepth,
7793 	.map_queues =		sdebug_map_queues,
7794 	.mq_poll =		sdebug_blk_mq_poll,
7795 	.eh_abort_handler =	scsi_debug_abort,
7796 	.eh_device_reset_handler = scsi_debug_device_reset,
7797 	.eh_target_reset_handler = scsi_debug_target_reset,
7798 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7799 	.eh_host_reset_handler = scsi_debug_host_reset,
7800 	.can_queue =		SDEBUG_CANQUEUE,
7801 	.this_id =		7,
7802 	.sg_tablesize =		SG_MAX_SEGMENTS,
7803 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7804 	.max_sectors =		-1U,
7805 	.max_segment_size =	-1U,
7806 	.module =		THIS_MODULE,
7807 	.track_queue_depth =	1,
7808 };
7809 
sdebug_driver_probe(struct device * dev)7810 static int sdebug_driver_probe(struct device *dev)
7811 {
7812 	int error = 0;
7813 	struct sdebug_host_info *sdbg_host;
7814 	struct Scsi_Host *hpnt;
7815 	int hprot;
7816 
7817 	sdbg_host = to_sdebug_host(dev);
7818 
7819 	sdebug_driver_template.can_queue = sdebug_max_queue;
7820 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7821 	if (!sdebug_clustering)
7822 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7823 
7824 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7825 	if (NULL == hpnt) {
7826 		pr_err("scsi_host_alloc failed\n");
7827 		error = -ENODEV;
7828 		return error;
7829 	}
7830 	if (submit_queues > nr_cpu_ids) {
7831 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7832 			my_name, submit_queues, nr_cpu_ids);
7833 		submit_queues = nr_cpu_ids;
7834 	}
7835 	/*
7836 	 * Decide whether to tell scsi subsystem that we want mq. The
7837 	 * following should give the same answer for each host.
7838 	 */
7839 	hpnt->nr_hw_queues = submit_queues;
7840 	if (sdebug_host_max_queue)
7841 		hpnt->host_tagset = 1;
7842 
7843 	/* poll queues are possible for nr_hw_queues > 1 */
7844 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7845 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7846 			 my_name, poll_queues, hpnt->nr_hw_queues);
7847 		poll_queues = 0;
7848 	}
7849 
7850 	/*
7851 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7852 	 * left over for non-polled I/O.
7853 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7854 	 */
7855 	if (poll_queues >= submit_queues) {
7856 		if (submit_queues < 3)
7857 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7858 		else
7859 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7860 				my_name, submit_queues - 1);
7861 		poll_queues = 1;
7862 	}
7863 	if (poll_queues)
7864 		hpnt->nr_maps = 3;
7865 
7866 	sdbg_host->shost = hpnt;
7867 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7868 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7869 		hpnt->max_id = sdebug_num_tgts + 1;
7870 	else
7871 		hpnt->max_id = sdebug_num_tgts;
7872 	/* = sdebug_max_luns; */
7873 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7874 
7875 	hprot = 0;
7876 
7877 	switch (sdebug_dif) {
7878 
7879 	case T10_PI_TYPE1_PROTECTION:
7880 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7881 		if (sdebug_dix)
7882 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7883 		break;
7884 
7885 	case T10_PI_TYPE2_PROTECTION:
7886 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7887 		if (sdebug_dix)
7888 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7889 		break;
7890 
7891 	case T10_PI_TYPE3_PROTECTION:
7892 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7893 		if (sdebug_dix)
7894 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7895 		break;
7896 
7897 	default:
7898 		if (sdebug_dix)
7899 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7900 		break;
7901 	}
7902 
7903 	scsi_host_set_prot(hpnt, hprot);
7904 
7905 	if (have_dif_prot || sdebug_dix)
7906 		pr_info("host protection%s%s%s%s%s%s%s\n",
7907 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7908 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7909 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7910 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7911 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7912 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7913 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7914 
7915 	if (sdebug_guard == 1)
7916 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7917 	else
7918 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7919 
7920 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7921 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7922 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7923 		sdebug_statistics = true;
7924 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7925 	if (error) {
7926 		pr_err("scsi_add_host failed\n");
7927 		error = -ENODEV;
7928 		scsi_host_put(hpnt);
7929 	} else {
7930 		scsi_scan_host(hpnt);
7931 	}
7932 
7933 	return error;
7934 }
7935 
sdebug_driver_remove(struct device * dev)7936 static void sdebug_driver_remove(struct device *dev)
7937 {
7938 	struct sdebug_host_info *sdbg_host;
7939 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7940 
7941 	sdbg_host = to_sdebug_host(dev);
7942 
7943 	scsi_remove_host(sdbg_host->shost);
7944 
7945 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7946 				 dev_list) {
7947 		list_del(&sdbg_devinfo->dev_list);
7948 		kfree(sdbg_devinfo->zstate);
7949 		kfree(sdbg_devinfo);
7950 	}
7951 
7952 	scsi_host_put(sdbg_host->shost);
7953 }
7954 
pseudo_lld_bus_match(struct device * dev,struct device_driver * dev_driver)7955 static int pseudo_lld_bus_match(struct device *dev,
7956 				struct device_driver *dev_driver)
7957 {
7958 	return 1;
7959 }
7960 
7961 static struct bus_type pseudo_lld_bus = {
7962 	.name = "pseudo",
7963 	.match = pseudo_lld_bus_match,
7964 	.probe = sdebug_driver_probe,
7965 	.remove = sdebug_driver_remove,
7966 	.drv_groups = sdebug_drv_groups,
7967 };
7968