1 #ifndef S390_DEVICE_H
2 #define S390_DEVICE_H
3
4 #include <asm/ccwdev.h>
5 #include <linux/atomic.h>
6 #include <linux/wait.h>
7 #include <linux/notifier.h>
8 #include <linux/kernel_stat.h>
9 #include "io_sch.h"
10
11 /*
12 * states of the device statemachine
13 */
14 enum dev_state {
15 DEV_STATE_NOT_OPER,
16 DEV_STATE_SENSE_PGID,
17 DEV_STATE_SENSE_ID,
18 DEV_STATE_OFFLINE,
19 DEV_STATE_VERIFY,
20 DEV_STATE_ONLINE,
21 DEV_STATE_W4SENSE,
22 DEV_STATE_DISBAND_PGID,
23 DEV_STATE_BOXED,
24 /* states to wait for i/o completion before doing something */
25 DEV_STATE_TIMEOUT_KILL,
26 DEV_STATE_QUIESCE,
27 /* special states for devices gone not operational */
28 DEV_STATE_DISCONNECTED,
29 DEV_STATE_DISCONNECTED_SENSE_ID,
30 DEV_STATE_CMFCHANGE,
31 DEV_STATE_CMFUPDATE,
32 DEV_STATE_STEAL_LOCK,
33 /* last element! */
34 NR_DEV_STATES
35 };
36
37 /*
38 * asynchronous events of the device statemachine
39 */
40 enum dev_event {
41 DEV_EVENT_NOTOPER,
42 DEV_EVENT_INTERRUPT,
43 DEV_EVENT_TIMEOUT,
44 DEV_EVENT_VERIFY,
45 /* last element! */
46 NR_DEV_EVENTS
47 };
48
49 struct ccw_device;
50
51 /*
52 * action called through jumptable
53 */
54 typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
55 extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
56
57 static inline void
dev_fsm_event(struct ccw_device * cdev,enum dev_event dev_event)58 dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
59 {
60 int state = cdev->private->state;
61
62 if (dev_event == DEV_EVENT_INTERRUPT) {
63 if (state == DEV_STATE_ONLINE)
64 kstat_cpu(smp_processor_id()).
65 irqs[cdev->private->int_class]++;
66 else if (state != DEV_STATE_CMFCHANGE &&
67 state != DEV_STATE_CMFUPDATE)
68 kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
69 }
70 dev_jumptable[state][dev_event](cdev, dev_event);
71 }
72
73 /*
74 * Delivers 1 if the device state is final.
75 */
76 static inline int
dev_fsm_final_state(struct ccw_device * cdev)77 dev_fsm_final_state(struct ccw_device *cdev)
78 {
79 return (cdev->private->state == DEV_STATE_NOT_OPER ||
80 cdev->private->state == DEV_STATE_OFFLINE ||
81 cdev->private->state == DEV_STATE_ONLINE ||
82 cdev->private->state == DEV_STATE_BOXED);
83 }
84
85 extern wait_queue_head_t ccw_device_init_wq;
86 extern atomic_t ccw_device_init_count;
87 int __init io_subchannel_init(void);
88
89 void io_subchannel_recog_done(struct ccw_device *cdev);
90 void io_subchannel_init_config(struct subchannel *sch);
91
92 int ccw_device_cancel_halt_clear(struct ccw_device *);
93
94 int ccw_device_is_orphan(struct ccw_device *);
95
96 void ccw_device_recognition(struct ccw_device *);
97 int ccw_device_online(struct ccw_device *);
98 int ccw_device_offline(struct ccw_device *);
99 void ccw_device_update_sense_data(struct ccw_device *);
100 int ccw_device_test_sense_data(struct ccw_device *);
101 void ccw_device_schedule_sch_unregister(struct ccw_device *);
102 int ccw_purge_blacklisted(void);
103 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
104
105 /* Function prototypes for device status and basic sense stuff. */
106 void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
107 void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
108 int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
109 int ccw_device_do_sense(struct ccw_device *, struct irb *);
110
111 /* Function prototype for internal request handling. */
112 int lpm_adjust(int lpm, int mask);
113 void ccw_request_start(struct ccw_device *);
114 int ccw_request_cancel(struct ccw_device *cdev);
115 void ccw_request_handler(struct ccw_device *cdev);
116 void ccw_request_timeout(struct ccw_device *cdev);
117 void ccw_request_notoper(struct ccw_device *cdev);
118
119 /* Function prototypes for sense id stuff. */
120 void ccw_device_sense_id_start(struct ccw_device *);
121 void ccw_device_sense_id_done(struct ccw_device *, int);
122
123 /* Function prototypes for path grouping stuff. */
124 void ccw_device_verify_start(struct ccw_device *);
125 void ccw_device_verify_done(struct ccw_device *, int);
126
127 void ccw_device_disband_start(struct ccw_device *);
128 void ccw_device_disband_done(struct ccw_device *, int);
129
130 void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
131 void ccw_device_stlck_done(struct ccw_device *, void *, int);
132
133 int ccw_device_call_handler(struct ccw_device *);
134
135 int ccw_device_stlck(struct ccw_device *);
136
137 /* Helper function for machine check handling. */
138 void ccw_device_trigger_reprobe(struct ccw_device *);
139 void ccw_device_kill_io(struct ccw_device *);
140 int ccw_device_notify(struct ccw_device *, int);
141 void ccw_device_set_disconnected(struct ccw_device *cdev);
142 void ccw_device_set_notoper(struct ccw_device *cdev);
143
144 /* qdio needs this. */
145 void ccw_device_set_timeout(struct ccw_device *, int);
146 extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
147
148 /* Channel measurement facility related */
149 void retry_set_schib(struct ccw_device *cdev);
150 void cmf_retry_copy_block(struct ccw_device *);
151 int cmf_reenable(struct ccw_device *);
152 int ccw_set_cmf(struct ccw_device *cdev, int enable);
153 extern struct device_attribute dev_attr_cmb_enable;
154 #endif
155