1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include "isci.h"
57 #include "port.h"
58 #include "request.h"
59 
60 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
61 #define SCU_DUMMY_INDEX    (0xFFFF)
62 
63 #undef C
64 #define C(a) (#a)
port_state_name(enum sci_port_states state)65 const char *port_state_name(enum sci_port_states state)
66 {
67 	static const char * const strings[] = PORT_STATES;
68 
69 	return strings[state];
70 }
71 #undef C
72 
sciport_to_dev(struct isci_port * iport)73 static struct device *sciport_to_dev(struct isci_port *iport)
74 {
75 	int i = iport->physical_port_index;
76 	struct isci_port *table;
77 	struct isci_host *ihost;
78 
79 	if (i == SCIC_SDS_DUMMY_PORT)
80 		i = SCI_MAX_PORTS+1;
81 
82 	table = iport - i;
83 	ihost = container_of(table, typeof(*ihost), ports[0]);
84 
85 	return &ihost->pdev->dev;
86 }
87 
sci_port_get_protocols(struct isci_port * iport,struct sci_phy_proto * proto)88 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
89 {
90 	u8 index;
91 
92 	proto->all = 0;
93 	for (index = 0; index < SCI_MAX_PHYS; index++) {
94 		struct isci_phy *iphy = iport->phy_table[index];
95 
96 		if (!iphy)
97 			continue;
98 		sci_phy_get_protocols(iphy, proto);
99 	}
100 }
101 
sci_port_get_phys(struct isci_port * iport)102 static u32 sci_port_get_phys(struct isci_port *iport)
103 {
104 	u32 index;
105 	u32 mask;
106 
107 	mask = 0;
108 	for (index = 0; index < SCI_MAX_PHYS; index++)
109 		if (iport->phy_table[index])
110 			mask |= (1 << index);
111 
112 	return mask;
113 }
114 
115 /**
116  * sci_port_get_properties() - This method simply returns the properties
117  *    regarding the port, such as: physical index, protocols, sas address, etc.
118  * @port: this parameter specifies the port for which to retrieve the physical
119  *    index.
120  * @properties: This parameter specifies the properties structure into which to
121  *    copy the requested information.
122  *
123  * Indicate if the user specified a valid port. SCI_SUCCESS This value is
124  * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
125  * value is returned if the specified port is not valid.  When this value is
126  * returned, no data is copied to the properties output parameter.
127  */
sci_port_get_properties(struct isci_port * iport,struct sci_port_properties * prop)128 enum sci_status sci_port_get_properties(struct isci_port *iport,
129 						struct sci_port_properties *prop)
130 {
131 	if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
132 		return SCI_FAILURE_INVALID_PORT;
133 
134 	prop->index = iport->logical_port_index;
135 	prop->phy_mask = sci_port_get_phys(iport);
136 	sci_port_get_sas_address(iport, &prop->local.sas_address);
137 	sci_port_get_protocols(iport, &prop->local.protocols);
138 	sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
139 
140 	return SCI_SUCCESS;
141 }
142 
sci_port_bcn_enable(struct isci_port * iport)143 static void sci_port_bcn_enable(struct isci_port *iport)
144 {
145 	struct isci_phy *iphy;
146 	u32 val;
147 	int i;
148 
149 	for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
150 		iphy = iport->phy_table[i];
151 		if (!iphy)
152 			continue;
153 		val = readl(&iphy->link_layer_registers->link_layer_control);
154 		/* clear the bit by writing 1. */
155 		writel(val, &iphy->link_layer_registers->link_layer_control);
156 	}
157 }
158 
isci_port_bc_change_received(struct isci_host * ihost,struct isci_port * iport,struct isci_phy * iphy)159 static void isci_port_bc_change_received(struct isci_host *ihost,
160 					 struct isci_port *iport,
161 					 struct isci_phy *iphy)
162 {
163 	dev_dbg(&ihost->pdev->dev,
164 		"%s: isci_phy = %p, sas_phy = %p\n",
165 		__func__, iphy, &iphy->sas_phy);
166 
167 	ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
168 	sci_port_bcn_enable(iport);
169 }
170 
isci_port_link_up(struct isci_host * isci_host,struct isci_port * iport,struct isci_phy * iphy)171 static void isci_port_link_up(struct isci_host *isci_host,
172 			      struct isci_port *iport,
173 			      struct isci_phy *iphy)
174 {
175 	unsigned long flags;
176 	struct sci_port_properties properties;
177 	unsigned long success = true;
178 
179 	dev_dbg(&isci_host->pdev->dev,
180 		"%s: isci_port = %p\n",
181 		__func__, iport);
182 
183 	spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
184 
185 	sci_port_get_properties(iport, &properties);
186 
187 	if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
188 		u64 attached_sas_address;
189 
190 		iphy->sas_phy.oob_mode = SATA_OOB_MODE;
191 		iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
192 
193 		/*
194 		 * For direct-attached SATA devices, the SCI core will
195 		 * automagically assign a SAS address to the end device
196 		 * for the purpose of creating a port. This SAS address
197 		 * will not be the same as assigned to the PHY and needs
198 		 * to be obtained from struct sci_port_properties properties.
199 		 */
200 		attached_sas_address = properties.remote.sas_address.high;
201 		attached_sas_address <<= 32;
202 		attached_sas_address |= properties.remote.sas_address.low;
203 		swab64s(&attached_sas_address);
204 
205 		memcpy(&iphy->sas_phy.attached_sas_addr,
206 		       &attached_sas_address, sizeof(attached_sas_address));
207 	} else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
208 		iphy->sas_phy.oob_mode = SAS_OOB_MODE;
209 		iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
210 
211 		/* Copy the attached SAS address from the IAF */
212 		memcpy(iphy->sas_phy.attached_sas_addr,
213 		       iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
214 	} else {
215 		dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
216 		success = false;
217 	}
218 
219 	iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
220 
221 	spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
222 
223 	/* Notify libsas that we have an address frame, if indeed
224 	 * we've found an SSP, SMP, or STP target */
225 	if (success)
226 		isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
227 						    PORTE_BYTES_DMAED);
228 }
229 
230 
231 /**
232  * isci_port_link_down() - This function is called by the sci core when a link
233  *    becomes inactive.
234  * @isci_host: This parameter specifies the isci host object.
235  * @phy: This parameter specifies the isci phy with the active link.
236  * @port: This parameter specifies the isci port with the active link.
237  *
238  */
isci_port_link_down(struct isci_host * isci_host,struct isci_phy * isci_phy,struct isci_port * isci_port)239 static void isci_port_link_down(struct isci_host *isci_host,
240 				struct isci_phy *isci_phy,
241 				struct isci_port *isci_port)
242 {
243 	struct isci_remote_device *isci_device;
244 
245 	dev_dbg(&isci_host->pdev->dev,
246 		"%s: isci_port = %p\n", __func__, isci_port);
247 
248 	if (isci_port) {
249 
250 		/* check to see if this is the last phy on this port. */
251 		if (isci_phy->sas_phy.port &&
252 		    isci_phy->sas_phy.port->num_phys == 1) {
253 			/* change the state for all devices on this port.  The
254 			 * next task sent to this device will be returned as
255 			 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
256 			 * remove the target
257 			 */
258 			list_for_each_entry(isci_device,
259 					    &isci_port->remote_dev_list,
260 					    node) {
261 				dev_dbg(&isci_host->pdev->dev,
262 					"%s: isci_device = %p\n",
263 					__func__, isci_device);
264 				set_bit(IDEV_GONE, &isci_device->flags);
265 			}
266 		}
267 	}
268 
269 	/* Notify libsas of the borken link, this will trigger calls to our
270 	 * isci_port_deformed and isci_dev_gone functions.
271 	 */
272 	sas_phy_disconnected(&isci_phy->sas_phy);
273 	isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
274 					   PHYE_LOSS_OF_SIGNAL);
275 
276 	dev_dbg(&isci_host->pdev->dev,
277 		"%s: isci_port = %p - Done\n", __func__, isci_port);
278 }
279 
is_port_ready_state(enum sci_port_states state)280 static bool is_port_ready_state(enum sci_port_states state)
281 {
282 	switch (state) {
283 	case SCI_PORT_READY:
284 	case SCI_PORT_SUB_WAITING:
285 	case SCI_PORT_SUB_OPERATIONAL:
286 	case SCI_PORT_SUB_CONFIGURING:
287 		return true;
288 	default:
289 		return false;
290 	}
291 }
292 
293 /* flag dummy rnc hanling when exiting a ready state */
port_state_machine_change(struct isci_port * iport,enum sci_port_states state)294 static void port_state_machine_change(struct isci_port *iport,
295 				      enum sci_port_states state)
296 {
297 	struct sci_base_state_machine *sm = &iport->sm;
298 	enum sci_port_states old_state = sm->current_state_id;
299 
300 	if (is_port_ready_state(old_state) && !is_port_ready_state(state))
301 		iport->ready_exit = true;
302 
303 	sci_change_state(sm, state);
304 	iport->ready_exit = false;
305 }
306 
307 /**
308  * isci_port_hard_reset_complete() - This function is called by the sci core
309  *    when the hard reset complete notification has been received.
310  * @port: This parameter specifies the sci port with the active link.
311  * @completion_status: This parameter specifies the core status for the reset
312  *    process.
313  *
314  */
isci_port_hard_reset_complete(struct isci_port * isci_port,enum sci_status completion_status)315 static void isci_port_hard_reset_complete(struct isci_port *isci_port,
316 					  enum sci_status completion_status)
317 {
318 	struct isci_host *ihost = isci_port->owning_controller;
319 
320 	dev_dbg(&ihost->pdev->dev,
321 		"%s: isci_port = %p, completion_status=%x\n",
322 		     __func__, isci_port, completion_status);
323 
324 	/* Save the status of the hard reset from the port. */
325 	isci_port->hard_reset_status = completion_status;
326 
327 	if (completion_status != SCI_SUCCESS) {
328 
329 		/* The reset failed.  The port state is now SCI_PORT_FAILED. */
330 		if (isci_port->active_phy_mask == 0) {
331 			int phy_idx = isci_port->last_active_phy;
332 			struct isci_phy *iphy = &ihost->phys[phy_idx];
333 
334 			/* Generate the link down now to the host, since it
335 			 * was intercepted by the hard reset state machine when
336 			 * it really happened.
337 			 */
338 			isci_port_link_down(ihost, iphy, isci_port);
339 		}
340 		/* Advance the port state so that link state changes will be
341 		 * noticed.
342 		 */
343 		port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
344 
345 	}
346 	clear_bit(IPORT_RESET_PENDING, &isci_port->state);
347 	wake_up(&ihost->eventq);
348 
349 }
350 
351 /* This method will return a true value if the specified phy can be assigned to
352  * this port The following is a list of phys for each port that are allowed: -
353  * Port 0 - 3 2 1 0 - Port 1 -     1 - Port 2 - 3 2 - Port 3 - 3 This method
354  * doesn't preclude all configurations.  It merely ensures that a phy is part
355  * of the allowable set of phy identifiers for that port.  For example, one
356  * could assign phy 3 to port 0 and no other phys.  Please refer to
357  * sci_port_is_phy_mask_valid() for information regarding whether the
358  * phy_mask for a port can be supported. bool true if this is a valid phy
359  * assignment for the port false if this is not a valid phy assignment for the
360  * port
361  */
sci_port_is_valid_phy_assignment(struct isci_port * iport,u32 phy_index)362 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
363 {
364 	struct isci_host *ihost = iport->owning_controller;
365 	struct sci_user_parameters *user = &ihost->user_parameters;
366 
367 	/* Initialize to invalid value. */
368 	u32 existing_phy_index = SCI_MAX_PHYS;
369 	u32 index;
370 
371 	if ((iport->physical_port_index == 1) && (phy_index != 1))
372 		return false;
373 
374 	if (iport->physical_port_index == 3 && phy_index != 3)
375 		return false;
376 
377 	if (iport->physical_port_index == 2 &&
378 	    (phy_index == 0 || phy_index == 1))
379 		return false;
380 
381 	for (index = 0; index < SCI_MAX_PHYS; index++)
382 		if (iport->phy_table[index] && index != phy_index)
383 			existing_phy_index = index;
384 
385 	/* Ensure that all of the phys in the port are capable of
386 	 * operating at the same maximum link rate.
387 	 */
388 	if (existing_phy_index < SCI_MAX_PHYS &&
389 	    user->phys[phy_index].max_speed_generation !=
390 	    user->phys[existing_phy_index].max_speed_generation)
391 		return false;
392 
393 	return true;
394 }
395 
396 /**
397  *
398  * @sci_port: This is the port object for which to determine if the phy mask
399  *    can be supported.
400  *
401  * This method will return a true value if the port's phy mask can be supported
402  * by the SCU. The following is a list of valid PHY mask configurations for
403  * each port: - Port 0 - [[3  2] 1] 0 - Port 1 -        [1] - Port 2 - [[3] 2]
404  * - Port 3 -  [3] This method returns a boolean indication specifying if the
405  * phy mask can be supported. true if this is a valid phy assignment for the
406  * port false if this is not a valid phy assignment for the port
407  */
sci_port_is_phy_mask_valid(struct isci_port * iport,u32 phy_mask)408 static bool sci_port_is_phy_mask_valid(
409 	struct isci_port *iport,
410 	u32 phy_mask)
411 {
412 	if (iport->physical_port_index == 0) {
413 		if (((phy_mask & 0x0F) == 0x0F)
414 		    || ((phy_mask & 0x03) == 0x03)
415 		    || ((phy_mask & 0x01) == 0x01)
416 		    || (phy_mask == 0))
417 			return true;
418 	} else if (iport->physical_port_index == 1) {
419 		if (((phy_mask & 0x02) == 0x02)
420 		    || (phy_mask == 0))
421 			return true;
422 	} else if (iport->physical_port_index == 2) {
423 		if (((phy_mask & 0x0C) == 0x0C)
424 		    || ((phy_mask & 0x04) == 0x04)
425 		    || (phy_mask == 0))
426 			return true;
427 	} else if (iport->physical_port_index == 3) {
428 		if (((phy_mask & 0x08) == 0x08)
429 		    || (phy_mask == 0))
430 			return true;
431 	}
432 
433 	return false;
434 }
435 
436 /*
437  * This method retrieves a currently active (i.e. connected) phy contained in
438  * the port.  Currently, the lowest order phy that is connected is returned.
439  * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
440  * returned if there are no currently active (i.e. connected to a remote end
441  * point) phys contained in the port. All other values specify a struct sci_phy
442  * object that is active in the port.
443  */
sci_port_get_a_connected_phy(struct isci_port * iport)444 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
445 {
446 	u32 index;
447 	struct isci_phy *iphy;
448 
449 	for (index = 0; index < SCI_MAX_PHYS; index++) {
450 		/* Ensure that the phy is both part of the port and currently
451 		 * connected to the remote end-point.
452 		 */
453 		iphy = iport->phy_table[index];
454 		if (iphy && sci_port_active_phy(iport, iphy))
455 			return iphy;
456 	}
457 
458 	return NULL;
459 }
460 
sci_port_set_phy(struct isci_port * iport,struct isci_phy * iphy)461 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
462 {
463 	/* Check to see if we can add this phy to a port
464 	 * that means that the phy is not part of a port and that the port does
465 	 * not already have a phy assinged to the phy index.
466 	 */
467 	if (!iport->phy_table[iphy->phy_index] &&
468 	    !phy_get_non_dummy_port(iphy) &&
469 	    sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
470 		/* Phy is being added in the stopped state so we are in MPC mode
471 		 * make logical port index = physical port index
472 		 */
473 		iport->logical_port_index = iport->physical_port_index;
474 		iport->phy_table[iphy->phy_index] = iphy;
475 		sci_phy_set_port(iphy, iport);
476 
477 		return SCI_SUCCESS;
478 	}
479 
480 	return SCI_FAILURE;
481 }
482 
sci_port_clear_phy(struct isci_port * iport,struct isci_phy * iphy)483 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
484 {
485 	/* Make sure that this phy is part of this port */
486 	if (iport->phy_table[iphy->phy_index] == iphy &&
487 	    phy_get_non_dummy_port(iphy) == iport) {
488 		struct isci_host *ihost = iport->owning_controller;
489 
490 		/* Yep it is assigned to this port so remove it */
491 		sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
492 		iport->phy_table[iphy->phy_index] = NULL;
493 		return SCI_SUCCESS;
494 	}
495 
496 	return SCI_FAILURE;
497 }
498 
sci_port_get_sas_address(struct isci_port * iport,struct sci_sas_address * sas)499 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
500 {
501 	u32 index;
502 
503 	sas->high = 0;
504 	sas->low  = 0;
505 	for (index = 0; index < SCI_MAX_PHYS; index++)
506 		if (iport->phy_table[index])
507 			sci_phy_get_sas_address(iport->phy_table[index], sas);
508 }
509 
sci_port_get_attached_sas_address(struct isci_port * iport,struct sci_sas_address * sas)510 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
511 {
512 	struct isci_phy *iphy;
513 
514 	/*
515 	 * Ensure that the phy is both part of the port and currently
516 	 * connected to the remote end-point.
517 	 */
518 	iphy = sci_port_get_a_connected_phy(iport);
519 	if (iphy) {
520 		if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
521 			sci_phy_get_attached_sas_address(iphy, sas);
522 		} else {
523 			sci_phy_get_sas_address(iphy, sas);
524 			sas->low += iphy->phy_index;
525 		}
526 	} else {
527 		sas->high = 0;
528 		sas->low  = 0;
529 	}
530 }
531 
532 /**
533  * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
534  *
535  * @sci_port: logical port on which we need to create the remote node context
536  * @rni: remote node index for this remote node context.
537  *
538  * This routine will construct a dummy remote node context data structure
539  * This structure will be posted to the hardware to work around a scheduler
540  * error in the hardware.
541  */
sci_port_construct_dummy_rnc(struct isci_port * iport,u16 rni)542 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
543 {
544 	union scu_remote_node_context *rnc;
545 
546 	rnc = &iport->owning_controller->remote_node_context_table[rni];
547 
548 	memset(rnc, 0, sizeof(union scu_remote_node_context));
549 
550 	rnc->ssp.remote_sas_address_hi = 0;
551 	rnc->ssp.remote_sas_address_lo = 0;
552 
553 	rnc->ssp.remote_node_index = rni;
554 	rnc->ssp.remote_node_port_width = 1;
555 	rnc->ssp.logical_port_index = iport->physical_port_index;
556 
557 	rnc->ssp.nexus_loss_timer_enable = false;
558 	rnc->ssp.check_bit = false;
559 	rnc->ssp.is_valid = true;
560 	rnc->ssp.is_remote_node_context = true;
561 	rnc->ssp.function_number = 0;
562 	rnc->ssp.arbitration_wait_time = 0;
563 }
564 
565 /*
566  * construct a dummy task context data structure.  This
567  * structure will be posted to the hardwre to work around a scheduler error
568  * in the hardware.
569  */
sci_port_construct_dummy_task(struct isci_port * iport,u16 tag)570 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
571 {
572 	struct isci_host *ihost = iport->owning_controller;
573 	struct scu_task_context *task_context;
574 
575 	task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
576 	memset(task_context, 0, sizeof(struct scu_task_context));
577 
578 	task_context->initiator_request = 1;
579 	task_context->connection_rate = 1;
580 	task_context->logical_port_index = iport->physical_port_index;
581 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
582 	task_context->task_index = ISCI_TAG_TCI(tag);
583 	task_context->valid = SCU_TASK_CONTEXT_VALID;
584 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
585 	task_context->remote_node_index = iport->reserved_rni;
586 	task_context->do_not_dma_ssp_good_response = 1;
587 	task_context->task_phase = 0x01;
588 }
589 
sci_port_destroy_dummy_resources(struct isci_port * iport)590 static void sci_port_destroy_dummy_resources(struct isci_port *iport)
591 {
592 	struct isci_host *ihost = iport->owning_controller;
593 
594 	if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
595 		isci_free_tag(ihost, iport->reserved_tag);
596 
597 	if (iport->reserved_rni != SCU_DUMMY_INDEX)
598 		sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
599 								     1, iport->reserved_rni);
600 
601 	iport->reserved_rni = SCU_DUMMY_INDEX;
602 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
603 }
604 
sci_port_setup_transports(struct isci_port * iport,u32 device_id)605 void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
606 {
607 	u8 index;
608 
609 	for (index = 0; index < SCI_MAX_PHYS; index++) {
610 		if (iport->active_phy_mask & (1 << index))
611 			sci_phy_setup_transport(iport->phy_table[index], device_id);
612 	}
613 }
614 
sci_port_resume_phy(struct isci_port * iport,struct isci_phy * iphy)615 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
616 {
617 	sci_phy_resume(iphy);
618 	iport->enabled_phy_mask |= 1 << iphy->phy_index;
619 }
620 
sci_port_activate_phy(struct isci_port * iport,struct isci_phy * iphy,u8 flags)621 static void sci_port_activate_phy(struct isci_port *iport,
622 				  struct isci_phy *iphy,
623 				  u8 flags)
624 {
625 	struct isci_host *ihost = iport->owning_controller;
626 
627 	if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
628 		sci_phy_resume(iphy);
629 
630 	iport->active_phy_mask |= 1 << iphy->phy_index;
631 
632 	sci_controller_clear_invalid_phy(ihost, iphy);
633 
634 	if (flags & PF_NOTIFY)
635 		isci_port_link_up(ihost, iport, iphy);
636 }
637 
sci_port_deactivate_phy(struct isci_port * iport,struct isci_phy * iphy,bool do_notify_user)638 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
639 			     bool do_notify_user)
640 {
641 	struct isci_host *ihost = iport->owning_controller;
642 
643 	iport->active_phy_mask &= ~(1 << iphy->phy_index);
644 	iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
645 	if (!iport->active_phy_mask)
646 		iport->last_active_phy = iphy->phy_index;
647 
648 	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
649 
650 	/* Re-assign the phy back to the LP as if it were a narrow port for APC
651 	 * mode. For MPC mode, the phy will remain in the port.
652 	 */
653 	if (iport->owning_controller->oem_parameters.controller.mode_type ==
654 		SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
655 		writel(iphy->phy_index,
656 			&iport->port_pe_configuration_register[iphy->phy_index]);
657 
658 	if (do_notify_user == true)
659 		isci_port_link_down(ihost, iphy, iport);
660 }
661 
sci_port_invalid_link_up(struct isci_port * iport,struct isci_phy * iphy)662 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
663 {
664 	struct isci_host *ihost = iport->owning_controller;
665 
666 	/*
667 	 * Check to see if we have alreay reported this link as bad and if
668 	 * not go ahead and tell the SCI_USER that we have discovered an
669 	 * invalid link.
670 	 */
671 	if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
672 		ihost->invalid_phy_mask |= 1 << iphy->phy_index;
673 		dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
674 	}
675 }
676 
677 /**
678  * sci_port_general_link_up_handler - phy can be assigned to port?
679  * @sci_port: sci_port object for which has a phy that has gone link up.
680  * @sci_phy: This is the struct isci_phy object that has gone link up.
681  * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
682  *
683  * Determine if this phy can be assigned to this port . If the phy is
684  * not a valid PHY for this port then the function will notify the user.
685  * A PHY can only be part of a port if it's attached SAS ADDRESS is the
686  * same as all other PHYs in the same port.
687  */
sci_port_general_link_up_handler(struct isci_port * iport,struct isci_phy * iphy,u8 flags)688 static void sci_port_general_link_up_handler(struct isci_port *iport,
689 					     struct isci_phy *iphy,
690 					     u8 flags)
691 {
692 	struct sci_sas_address port_sas_address;
693 	struct sci_sas_address phy_sas_address;
694 
695 	sci_port_get_attached_sas_address(iport, &port_sas_address);
696 	sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
697 
698 	/* If the SAS address of the new phy matches the SAS address of
699 	 * other phys in the port OR this is the first phy in the port,
700 	 * then activate the phy and allow it to be used for operations
701 	 * in this port.
702 	 */
703 	if ((phy_sas_address.high == port_sas_address.high &&
704 	     phy_sas_address.low  == port_sas_address.low) ||
705 	    iport->active_phy_mask == 0) {
706 		struct sci_base_state_machine *sm = &iport->sm;
707 
708 		sci_port_activate_phy(iport, iphy, flags);
709 		if (sm->current_state_id == SCI_PORT_RESETTING)
710 			port_state_machine_change(iport, SCI_PORT_READY);
711 	} else
712 		sci_port_invalid_link_up(iport, iphy);
713 }
714 
715 
716 
717 /**
718  * This method returns false if the port only has a single phy object assigned.
719  *     If there are no phys or more than one phy then the method will return
720  *    true.
721  * @sci_port: The port for which the wide port condition is to be checked.
722  *
723  * bool true Is returned if this is a wide ported port. false Is returned if
724  * this is a narrow port.
725  */
sci_port_is_wide(struct isci_port * iport)726 static bool sci_port_is_wide(struct isci_port *iport)
727 {
728 	u32 index;
729 	u32 phy_count = 0;
730 
731 	for (index = 0; index < SCI_MAX_PHYS; index++) {
732 		if (iport->phy_table[index] != NULL) {
733 			phy_count++;
734 		}
735 	}
736 
737 	return phy_count != 1;
738 }
739 
740 /**
741  * This method is called by the PHY object when the link is detected. if the
742  *    port wants the PHY to continue on to the link up state then the port
743  *    layer must return true.  If the port object returns false the phy object
744  *    must halt its attempt to go link up.
745  * @sci_port: The port associated with the phy object.
746  * @sci_phy: The phy object that is trying to go link up.
747  *
748  * true if the phy object can continue to the link up condition. true Is
749  * returned if this phy can continue to the ready state. false Is returned if
750  * can not continue on to the ready state. This notification is in place for
751  * wide ports and direct attached phys.  Since there are no wide ported SATA
752  * devices this could become an invalid port configuration.
753  */
sci_port_link_detected(struct isci_port * iport,struct isci_phy * iphy)754 bool sci_port_link_detected(
755 	struct isci_port *iport,
756 	struct isci_phy *iphy)
757 {
758 	if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
759 	    (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
760 		if (sci_port_is_wide(iport)) {
761 			sci_port_invalid_link_up(iport, iphy);
762 			return false;
763 		} else {
764 			struct isci_host *ihost = iport->owning_controller;
765 			struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
766 			writel(iphy->phy_index,
767 			       &dst_port->port_pe_configuration_register[iphy->phy_index]);
768 		}
769 	}
770 
771 	return true;
772 }
773 
port_timeout(unsigned long data)774 static void port_timeout(unsigned long data)
775 {
776 	struct sci_timer *tmr = (struct sci_timer *)data;
777 	struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
778 	struct isci_host *ihost = iport->owning_controller;
779 	unsigned long flags;
780 	u32 current_state;
781 
782 	spin_lock_irqsave(&ihost->scic_lock, flags);
783 
784 	if (tmr->cancel)
785 		goto done;
786 
787 	current_state = iport->sm.current_state_id;
788 
789 	if (current_state == SCI_PORT_RESETTING) {
790 		/* if the port is still in the resetting state then the timeout
791 		 * fired before the reset completed.
792 		 */
793 		port_state_machine_change(iport, SCI_PORT_FAILED);
794 	} else if (current_state == SCI_PORT_STOPPED) {
795 		/* if the port is stopped then the start request failed In this
796 		 * case stay in the stopped state.
797 		 */
798 		dev_err(sciport_to_dev(iport),
799 			"%s: SCIC Port 0x%p failed to stop before tiemout.\n",
800 			__func__,
801 			iport);
802 	} else if (current_state == SCI_PORT_STOPPING) {
803 		dev_dbg(sciport_to_dev(iport),
804 			"%s: port%d: stop complete timeout\n",
805 			__func__, iport->physical_port_index);
806 	} else {
807 		/* The port is in the ready state and we have a timer
808 		 * reporting a timeout this should not happen.
809 		 */
810 		dev_err(sciport_to_dev(iport),
811 			"%s: SCIC Port 0x%p is processing a timeout operation "
812 			"in state %d.\n", __func__, iport, current_state);
813 	}
814 
815 done:
816 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
817 }
818 
819 /* --------------------------------------------------------------------------- */
820 
821 /**
822  * This function updates the hardwares VIIT entry for this port.
823  *
824  *
825  */
sci_port_update_viit_entry(struct isci_port * iport)826 static void sci_port_update_viit_entry(struct isci_port *iport)
827 {
828 	struct sci_sas_address sas_address;
829 
830 	sci_port_get_sas_address(iport, &sas_address);
831 
832 	writel(sas_address.high,
833 		&iport->viit_registers->initiator_sas_address_hi);
834 	writel(sas_address.low,
835 		&iport->viit_registers->initiator_sas_address_lo);
836 
837 	/* This value get cleared just in case its not already cleared */
838 	writel(0, &iport->viit_registers->reserved);
839 
840 	/* We are required to update the status register last */
841 	writel(SCU_VIIT_ENTRY_ID_VIIT |
842 	       SCU_VIIT_IPPT_INITIATOR |
843 	       ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
844 	       SCU_VIIT_STATUS_ALL_VALID,
845 	       &iport->viit_registers->status);
846 }
847 
sci_port_get_max_allowed_speed(struct isci_port * iport)848 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
849 {
850 	u16 index;
851 	struct isci_phy *iphy;
852 	enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
853 
854 	/*
855 	 * Loop through all of the phys in this port and find the phy with the
856 	 * lowest maximum link rate. */
857 	for (index = 0; index < SCI_MAX_PHYS; index++) {
858 		iphy = iport->phy_table[index];
859 		if (iphy && sci_port_active_phy(iport, iphy) &&
860 		    iphy->max_negotiated_speed < max_allowed_speed)
861 			max_allowed_speed = iphy->max_negotiated_speed;
862 	}
863 
864 	return max_allowed_speed;
865 }
866 
sci_port_suspend_port_task_scheduler(struct isci_port * iport)867 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
868 {
869 	u32 pts_control_value;
870 
871 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
872 	pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
873 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
874 }
875 
876 /**
877  * sci_port_post_dummy_request() - post dummy/workaround request
878  * @sci_port: port to post task
879  *
880  * Prevent the hardware scheduler from posting new requests to the front
881  * of the scheduler queue causing a starvation problem for currently
882  * ongoing requests.
883  *
884  */
sci_port_post_dummy_request(struct isci_port * iport)885 static void sci_port_post_dummy_request(struct isci_port *iport)
886 {
887 	struct isci_host *ihost = iport->owning_controller;
888 	u16 tag = iport->reserved_tag;
889 	struct scu_task_context *tc;
890 	u32 command;
891 
892 	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
893 	tc->abort = 0;
894 
895 	command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
896 		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
897 		  ISCI_TAG_TCI(tag);
898 
899 	sci_controller_post_request(ihost, command);
900 }
901 
902 /**
903  * This routine will abort the dummy request.  This will alow the hardware to
904  * power down parts of the silicon to save power.
905  *
906  * @sci_port: The port on which the task must be aborted.
907  *
908  */
sci_port_abort_dummy_request(struct isci_port * iport)909 static void sci_port_abort_dummy_request(struct isci_port *iport)
910 {
911 	struct isci_host *ihost = iport->owning_controller;
912 	u16 tag = iport->reserved_tag;
913 	struct scu_task_context *tc;
914 	u32 command;
915 
916 	tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
917 	tc->abort = 1;
918 
919 	command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
920 		  iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
921 		  ISCI_TAG_TCI(tag);
922 
923 	sci_controller_post_request(ihost, command);
924 }
925 
926 /**
927  *
928  * @sci_port: This is the struct isci_port object to resume.
929  *
930  * This method will resume the port task scheduler for this port object. none
931  */
932 static void
sci_port_resume_port_task_scheduler(struct isci_port * iport)933 sci_port_resume_port_task_scheduler(struct isci_port *iport)
934 {
935 	u32 pts_control_value;
936 
937 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
938 	pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
939 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
940 }
941 
sci_port_ready_substate_waiting_enter(struct sci_base_state_machine * sm)942 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
943 {
944 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
945 
946 	sci_port_suspend_port_task_scheduler(iport);
947 
948 	iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
949 
950 	if (iport->active_phy_mask != 0) {
951 		/* At least one of the phys on the port is ready */
952 		port_state_machine_change(iport,
953 					  SCI_PORT_SUB_OPERATIONAL);
954 	}
955 }
956 
scic_sds_port_ready_substate_waiting_exit(struct sci_base_state_machine * sm)957 static void scic_sds_port_ready_substate_waiting_exit(
958 					struct sci_base_state_machine *sm)
959 {
960 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
961 	sci_port_resume_port_task_scheduler(iport);
962 }
963 
sci_port_ready_substate_operational_enter(struct sci_base_state_machine * sm)964 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
965 {
966 	u32 index;
967 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
968 	struct isci_host *ihost = iport->owning_controller;
969 
970 	dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
971 		__func__, iport->physical_port_index);
972 
973 	for (index = 0; index < SCI_MAX_PHYS; index++) {
974 		if (iport->phy_table[index]) {
975 			writel(iport->physical_port_index,
976 				&iport->port_pe_configuration_register[
977 					iport->phy_table[index]->phy_index]);
978 			if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
979 				sci_port_resume_phy(iport, iport->phy_table[index]);
980 		}
981 	}
982 
983 	sci_port_update_viit_entry(iport);
984 
985 	/*
986 	 * Post the dummy task for the port so the hardware can schedule
987 	 * io correctly
988 	 */
989 	sci_port_post_dummy_request(iport);
990 }
991 
sci_port_invalidate_dummy_remote_node(struct isci_port * iport)992 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
993 {
994 	struct isci_host *ihost = iport->owning_controller;
995 	u8 phys_index = iport->physical_port_index;
996 	union scu_remote_node_context *rnc;
997 	u16 rni = iport->reserved_rni;
998 	u32 command;
999 
1000 	rnc = &ihost->remote_node_context_table[rni];
1001 
1002 	rnc->ssp.is_valid = false;
1003 
1004 	/* ensure the preceding tc abort request has reached the
1005 	 * controller and give it ample time to act before posting the rnc
1006 	 * invalidate
1007 	 */
1008 	readl(&ihost->smu_registers->interrupt_status); /* flush */
1009 	udelay(10);
1010 
1011 	command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1012 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1013 
1014 	sci_controller_post_request(ihost, command);
1015 }
1016 
1017 /**
1018  *
1019  * @object: This is the object which is cast to a struct isci_port object.
1020  *
1021  * This method will perform the actions required by the struct isci_port on
1022  * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
1023  * the port not ready and suspends the port task scheduler. none
1024  */
sci_port_ready_substate_operational_exit(struct sci_base_state_machine * sm)1025 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1026 {
1027 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1028 	struct isci_host *ihost = iport->owning_controller;
1029 
1030 	/*
1031 	 * Kill the dummy task for this port if it has not yet posted
1032 	 * the hardware will treat this as a NOP and just return abort
1033 	 * complete.
1034 	 */
1035 	sci_port_abort_dummy_request(iport);
1036 
1037 	dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1038 		__func__, iport->physical_port_index);
1039 
1040 	if (iport->ready_exit)
1041 		sci_port_invalidate_dummy_remote_node(iport);
1042 }
1043 
sci_port_ready_substate_configuring_enter(struct sci_base_state_machine * sm)1044 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1045 {
1046 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1047 	struct isci_host *ihost = iport->owning_controller;
1048 
1049 	if (iport->active_phy_mask == 0) {
1050 		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1051 			__func__, iport->physical_port_index);
1052 
1053 		port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1054 	} else
1055 		port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1056 }
1057 
sci_port_start(struct isci_port * iport)1058 enum sci_status sci_port_start(struct isci_port *iport)
1059 {
1060 	struct isci_host *ihost = iport->owning_controller;
1061 	enum sci_status status = SCI_SUCCESS;
1062 	enum sci_port_states state;
1063 	u32 phy_mask;
1064 
1065 	state = iport->sm.current_state_id;
1066 	if (state != SCI_PORT_STOPPED) {
1067 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1068 			 __func__, port_state_name(state));
1069 		return SCI_FAILURE_INVALID_STATE;
1070 	}
1071 
1072 	if (iport->assigned_device_count > 0) {
1073 		/* TODO This is a start failure operation because
1074 		 * there are still devices assigned to this port.
1075 		 * There must be no devices assigned to a port on a
1076 		 * start operation.
1077 		 */
1078 		return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1079 	}
1080 
1081 	if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1082 		u16 rni = sci_remote_node_table_allocate_remote_node(
1083 				&ihost->available_remote_nodes, 1);
1084 
1085 		if (rni != SCU_DUMMY_INDEX)
1086 			sci_port_construct_dummy_rnc(iport, rni);
1087 		else
1088 			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1089 		iport->reserved_rni = rni;
1090 	}
1091 
1092 	if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1093 		u16 tag;
1094 
1095 		tag = isci_alloc_tag(ihost);
1096 		if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1097 			status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1098 		else
1099 			sci_port_construct_dummy_task(iport, tag);
1100 		iport->reserved_tag = tag;
1101 	}
1102 
1103 	if (status == SCI_SUCCESS) {
1104 		phy_mask = sci_port_get_phys(iport);
1105 
1106 		/*
1107 		 * There are one or more phys assigned to this port.  Make sure
1108 		 * the port's phy mask is in fact legal and supported by the
1109 		 * silicon.
1110 		 */
1111 		if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1112 			port_state_machine_change(iport,
1113 						  SCI_PORT_READY);
1114 
1115 			return SCI_SUCCESS;
1116 		}
1117 		status = SCI_FAILURE;
1118 	}
1119 
1120 	if (status != SCI_SUCCESS)
1121 		sci_port_destroy_dummy_resources(iport);
1122 
1123 	return status;
1124 }
1125 
sci_port_stop(struct isci_port * iport)1126 enum sci_status sci_port_stop(struct isci_port *iport)
1127 {
1128 	enum sci_port_states state;
1129 
1130 	state = iport->sm.current_state_id;
1131 	switch (state) {
1132 	case SCI_PORT_STOPPED:
1133 		return SCI_SUCCESS;
1134 	case SCI_PORT_SUB_WAITING:
1135 	case SCI_PORT_SUB_OPERATIONAL:
1136 	case SCI_PORT_SUB_CONFIGURING:
1137 	case SCI_PORT_RESETTING:
1138 		port_state_machine_change(iport,
1139 					  SCI_PORT_STOPPING);
1140 		return SCI_SUCCESS;
1141 	default:
1142 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1143 			 __func__, port_state_name(state));
1144 		return SCI_FAILURE_INVALID_STATE;
1145 	}
1146 }
1147 
sci_port_hard_reset(struct isci_port * iport,u32 timeout)1148 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1149 {
1150 	enum sci_status status = SCI_FAILURE_INVALID_PHY;
1151 	struct isci_phy *iphy = NULL;
1152 	enum sci_port_states state;
1153 	u32 phy_index;
1154 
1155 	state = iport->sm.current_state_id;
1156 	if (state != SCI_PORT_SUB_OPERATIONAL) {
1157 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1158 			 __func__, port_state_name(state));
1159 		return SCI_FAILURE_INVALID_STATE;
1160 	}
1161 
1162 	/* Select a phy on which we can send the hard reset request. */
1163 	for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1164 		iphy = iport->phy_table[phy_index];
1165 		if (iphy && !sci_port_active_phy(iport, iphy)) {
1166 			/*
1167 			 * We found a phy but it is not ready select
1168 			 * different phy
1169 			 */
1170 			iphy = NULL;
1171 		}
1172 	}
1173 
1174 	/* If we have a phy then go ahead and start the reset procedure */
1175 	if (!iphy)
1176 		return status;
1177 	status = sci_phy_reset(iphy);
1178 
1179 	if (status != SCI_SUCCESS)
1180 		return status;
1181 
1182 	sci_mod_timer(&iport->timer, timeout);
1183 	iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1184 
1185 	port_state_machine_change(iport, SCI_PORT_RESETTING);
1186 	return SCI_SUCCESS;
1187 }
1188 
1189 /**
1190  * sci_port_add_phy() -
1191  * @sci_port: This parameter specifies the port in which the phy will be added.
1192  * @sci_phy: This parameter is the phy which is to be added to the port.
1193  *
1194  * This method will add a PHY to the selected port. This method returns an
1195  * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1196  * status is a failure to add the phy to the port.
1197  */
sci_port_add_phy(struct isci_port * iport,struct isci_phy * iphy)1198 enum sci_status sci_port_add_phy(struct isci_port *iport,
1199 				      struct isci_phy *iphy)
1200 {
1201 	enum sci_status status;
1202 	enum sci_port_states state;
1203 
1204 	state = iport->sm.current_state_id;
1205 	switch (state) {
1206 	case SCI_PORT_STOPPED: {
1207 		struct sci_sas_address port_sas_address;
1208 
1209 		/* Read the port assigned SAS Address if there is one */
1210 		sci_port_get_sas_address(iport, &port_sas_address);
1211 
1212 		if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1213 			struct sci_sas_address phy_sas_address;
1214 
1215 			/* Make sure that the PHY SAS Address matches the SAS Address
1216 			 * for this port
1217 			 */
1218 			sci_phy_get_sas_address(iphy, &phy_sas_address);
1219 
1220 			if (port_sas_address.high != phy_sas_address.high ||
1221 			    port_sas_address.low  != phy_sas_address.low)
1222 				return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1223 		}
1224 		return sci_port_set_phy(iport, iphy);
1225 	}
1226 	case SCI_PORT_SUB_WAITING:
1227 	case SCI_PORT_SUB_OPERATIONAL:
1228 		status = sci_port_set_phy(iport, iphy);
1229 
1230 		if (status != SCI_SUCCESS)
1231 			return status;
1232 
1233 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1234 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1235 		port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1236 
1237 		return status;
1238 	case SCI_PORT_SUB_CONFIGURING:
1239 		status = sci_port_set_phy(iport, iphy);
1240 
1241 		if (status != SCI_SUCCESS)
1242 			return status;
1243 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1244 
1245 		/* Re-enter the configuring state since this may be the last phy in
1246 		 * the port.
1247 		 */
1248 		port_state_machine_change(iport,
1249 					  SCI_PORT_SUB_CONFIGURING);
1250 		return SCI_SUCCESS;
1251 	default:
1252 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1253 			 __func__, port_state_name(state));
1254 		return SCI_FAILURE_INVALID_STATE;
1255 	}
1256 }
1257 
1258 /**
1259  * sci_port_remove_phy() -
1260  * @sci_port: This parameter specifies the port in which the phy will be added.
1261  * @sci_phy: This parameter is the phy which is to be added to the port.
1262  *
1263  * This method will remove the PHY from the selected PORT. This method returns
1264  * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1265  * other status is a failure to add the phy to the port.
1266  */
sci_port_remove_phy(struct isci_port * iport,struct isci_phy * iphy)1267 enum sci_status sci_port_remove_phy(struct isci_port *iport,
1268 					 struct isci_phy *iphy)
1269 {
1270 	enum sci_status status;
1271 	enum sci_port_states state;
1272 
1273 	state = iport->sm.current_state_id;
1274 
1275 	switch (state) {
1276 	case SCI_PORT_STOPPED:
1277 		return sci_port_clear_phy(iport, iphy);
1278 	case SCI_PORT_SUB_OPERATIONAL:
1279 		status = sci_port_clear_phy(iport, iphy);
1280 		if (status != SCI_SUCCESS)
1281 			return status;
1282 
1283 		sci_port_deactivate_phy(iport, iphy, true);
1284 		iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1285 		port_state_machine_change(iport,
1286 					  SCI_PORT_SUB_CONFIGURING);
1287 		return SCI_SUCCESS;
1288 	case SCI_PORT_SUB_CONFIGURING:
1289 		status = sci_port_clear_phy(iport, iphy);
1290 
1291 		if (status != SCI_SUCCESS)
1292 			return status;
1293 		sci_port_deactivate_phy(iport, iphy, true);
1294 
1295 		/* Re-enter the configuring state since this may be the last phy in
1296 		 * the port
1297 		 */
1298 		port_state_machine_change(iport,
1299 					  SCI_PORT_SUB_CONFIGURING);
1300 		return SCI_SUCCESS;
1301 	default:
1302 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1303 			 __func__, port_state_name(state));
1304 		return SCI_FAILURE_INVALID_STATE;
1305 	}
1306 }
1307 
sci_port_link_up(struct isci_port * iport,struct isci_phy * iphy)1308 enum sci_status sci_port_link_up(struct isci_port *iport,
1309 				      struct isci_phy *iphy)
1310 {
1311 	enum sci_port_states state;
1312 
1313 	state = iport->sm.current_state_id;
1314 	switch (state) {
1315 	case SCI_PORT_SUB_WAITING:
1316 		/* Since this is the first phy going link up for the port we
1317 		 * can just enable it and continue
1318 		 */
1319 		sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1320 
1321 		port_state_machine_change(iport,
1322 					  SCI_PORT_SUB_OPERATIONAL);
1323 		return SCI_SUCCESS;
1324 	case SCI_PORT_SUB_OPERATIONAL:
1325 		sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1326 		return SCI_SUCCESS;
1327 	case SCI_PORT_RESETTING:
1328 		/* TODO We should  make  sure  that  the phy  that  has gone
1329 		 * link up is the same one on which we sent the reset.  It is
1330 		 * possible that the phy on which we sent  the reset is not the
1331 		 * one that has  gone  link up  and we  want to make sure that
1332 		 * phy being reset  comes  back.  Consider the case where a
1333 		 * reset is sent but before the hardware processes the reset it
1334 		 * get a link up on  the  port because of a hot plug event.
1335 		 * because  of  the reset request this phy will go link down
1336 		 * almost immediately.
1337 		 */
1338 
1339 		/* In the resetting state we don't notify the user regarding
1340 		 * link up and link down notifications.
1341 		 */
1342 		sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1343 		return SCI_SUCCESS;
1344 	default:
1345 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1346 			 __func__, port_state_name(state));
1347 		return SCI_FAILURE_INVALID_STATE;
1348 	}
1349 }
1350 
sci_port_link_down(struct isci_port * iport,struct isci_phy * iphy)1351 enum sci_status sci_port_link_down(struct isci_port *iport,
1352 					struct isci_phy *iphy)
1353 {
1354 	enum sci_port_states state;
1355 
1356 	state = iport->sm.current_state_id;
1357 	switch (state) {
1358 	case SCI_PORT_SUB_OPERATIONAL:
1359 		sci_port_deactivate_phy(iport, iphy, true);
1360 
1361 		/* If there are no active phys left in the port, then
1362 		 * transition the port to the WAITING state until such time
1363 		 * as a phy goes link up
1364 		 */
1365 		if (iport->active_phy_mask == 0)
1366 			port_state_machine_change(iport,
1367 						  SCI_PORT_SUB_WAITING);
1368 		return SCI_SUCCESS;
1369 	case SCI_PORT_RESETTING:
1370 		/* In the resetting state we don't notify the user regarding
1371 		 * link up and link down notifications. */
1372 		sci_port_deactivate_phy(iport, iphy, false);
1373 		return SCI_SUCCESS;
1374 	default:
1375 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1376 			 __func__, port_state_name(state));
1377 		return SCI_FAILURE_INVALID_STATE;
1378 	}
1379 }
1380 
sci_port_start_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)1381 enum sci_status sci_port_start_io(struct isci_port *iport,
1382 				  struct isci_remote_device *idev,
1383 				  struct isci_request *ireq)
1384 {
1385 	enum sci_port_states state;
1386 
1387 	state = iport->sm.current_state_id;
1388 	switch (state) {
1389 	case SCI_PORT_SUB_WAITING:
1390 		return SCI_FAILURE_INVALID_STATE;
1391 	case SCI_PORT_SUB_OPERATIONAL:
1392 		iport->started_request_count++;
1393 		return SCI_SUCCESS;
1394 	default:
1395 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1396 			 __func__, port_state_name(state));
1397 		return SCI_FAILURE_INVALID_STATE;
1398 	}
1399 }
1400 
sci_port_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)1401 enum sci_status sci_port_complete_io(struct isci_port *iport,
1402 				     struct isci_remote_device *idev,
1403 				     struct isci_request *ireq)
1404 {
1405 	enum sci_port_states state;
1406 
1407 	state = iport->sm.current_state_id;
1408 	switch (state) {
1409 	case SCI_PORT_STOPPED:
1410 		dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1411 			 __func__, port_state_name(state));
1412 		return SCI_FAILURE_INVALID_STATE;
1413 	case SCI_PORT_STOPPING:
1414 		sci_port_decrement_request_count(iport);
1415 
1416 		if (iport->started_request_count == 0)
1417 			port_state_machine_change(iport,
1418 						  SCI_PORT_STOPPED);
1419 		break;
1420 	case SCI_PORT_READY:
1421 	case SCI_PORT_RESETTING:
1422 	case SCI_PORT_FAILED:
1423 	case SCI_PORT_SUB_WAITING:
1424 	case SCI_PORT_SUB_OPERATIONAL:
1425 		sci_port_decrement_request_count(iport);
1426 		break;
1427 	case SCI_PORT_SUB_CONFIGURING:
1428 		sci_port_decrement_request_count(iport);
1429 		if (iport->started_request_count == 0) {
1430 			port_state_machine_change(iport,
1431 						  SCI_PORT_SUB_OPERATIONAL);
1432 		}
1433 		break;
1434 	}
1435 	return SCI_SUCCESS;
1436 }
1437 
sci_port_enable_port_task_scheduler(struct isci_port * iport)1438 static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1439 {
1440 	u32 pts_control_value;
1441 
1442 	 /* enable the port task scheduler in a suspended state */
1443 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1444 	pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1445 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1446 }
1447 
sci_port_disable_port_task_scheduler(struct isci_port * iport)1448 static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1449 {
1450 	u32 pts_control_value;
1451 
1452 	pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1453 	pts_control_value &=
1454 		~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1455 	writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1456 }
1457 
sci_port_post_dummy_remote_node(struct isci_port * iport)1458 static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1459 {
1460 	struct isci_host *ihost = iport->owning_controller;
1461 	u8 phys_index = iport->physical_port_index;
1462 	union scu_remote_node_context *rnc;
1463 	u16 rni = iport->reserved_rni;
1464 	u32 command;
1465 
1466 	rnc = &ihost->remote_node_context_table[rni];
1467 	rnc->ssp.is_valid = true;
1468 
1469 	command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1470 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1471 
1472 	sci_controller_post_request(ihost, command);
1473 
1474 	/* ensure hardware has seen the post rnc command and give it
1475 	 * ample time to act before sending the suspend
1476 	 */
1477 	readl(&ihost->smu_registers->interrupt_status); /* flush */
1478 	udelay(10);
1479 
1480 	command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1481 		  phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1482 
1483 	sci_controller_post_request(ihost, command);
1484 }
1485 
sci_port_stopped_state_enter(struct sci_base_state_machine * sm)1486 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1487 {
1488 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1489 
1490 	if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1491 		/*
1492 		 * If we enter this state becasuse of a request to stop
1493 		 * the port then we want to disable the hardwares port
1494 		 * task scheduler. */
1495 		sci_port_disable_port_task_scheduler(iport);
1496 	}
1497 }
1498 
sci_port_stopped_state_exit(struct sci_base_state_machine * sm)1499 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1500 {
1501 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1502 
1503 	/* Enable and suspend the port task scheduler */
1504 	sci_port_enable_port_task_scheduler(iport);
1505 }
1506 
sci_port_ready_state_enter(struct sci_base_state_machine * sm)1507 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1508 {
1509 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1510 	struct isci_host *ihost = iport->owning_controller;
1511 	u32 prev_state;
1512 
1513 	prev_state = iport->sm.previous_state_id;
1514 	if (prev_state  == SCI_PORT_RESETTING)
1515 		isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1516 	else
1517 		dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1518 			__func__, iport->physical_port_index);
1519 
1520 	/* Post and suspend the dummy remote node context for this port. */
1521 	sci_port_post_dummy_remote_node(iport);
1522 
1523 	/* Start the ready substate machine */
1524 	port_state_machine_change(iport,
1525 				  SCI_PORT_SUB_WAITING);
1526 }
1527 
sci_port_resetting_state_exit(struct sci_base_state_machine * sm)1528 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1529 {
1530 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1531 
1532 	sci_del_timer(&iport->timer);
1533 }
1534 
sci_port_stopping_state_exit(struct sci_base_state_machine * sm)1535 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1536 {
1537 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1538 
1539 	sci_del_timer(&iport->timer);
1540 
1541 	sci_port_destroy_dummy_resources(iport);
1542 }
1543 
sci_port_failed_state_enter(struct sci_base_state_machine * sm)1544 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1545 {
1546 	struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1547 
1548 	isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1549 }
1550 
1551 /* --------------------------------------------------------------------------- */
1552 
1553 static const struct sci_base_state sci_port_state_table[] = {
1554 	[SCI_PORT_STOPPED] = {
1555 		.enter_state = sci_port_stopped_state_enter,
1556 		.exit_state  = sci_port_stopped_state_exit
1557 	},
1558 	[SCI_PORT_STOPPING] = {
1559 		.exit_state  = sci_port_stopping_state_exit
1560 	},
1561 	[SCI_PORT_READY] = {
1562 		.enter_state = sci_port_ready_state_enter,
1563 	},
1564 	[SCI_PORT_SUB_WAITING] = {
1565 		.enter_state = sci_port_ready_substate_waiting_enter,
1566 		.exit_state  = scic_sds_port_ready_substate_waiting_exit,
1567 	},
1568 	[SCI_PORT_SUB_OPERATIONAL] = {
1569 		.enter_state = sci_port_ready_substate_operational_enter,
1570 		.exit_state  = sci_port_ready_substate_operational_exit
1571 	},
1572 	[SCI_PORT_SUB_CONFIGURING] = {
1573 		.enter_state = sci_port_ready_substate_configuring_enter
1574 	},
1575 	[SCI_PORT_RESETTING] = {
1576 		.exit_state  = sci_port_resetting_state_exit
1577 	},
1578 	[SCI_PORT_FAILED] = {
1579 		.enter_state = sci_port_failed_state_enter,
1580 	}
1581 };
1582 
sci_port_construct(struct isci_port * iport,u8 index,struct isci_host * ihost)1583 void sci_port_construct(struct isci_port *iport, u8 index,
1584 			     struct isci_host *ihost)
1585 {
1586 	sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1587 
1588 	iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
1589 	iport->physical_port_index = index;
1590 	iport->active_phy_mask     = 0;
1591 	iport->enabled_phy_mask    = 0;
1592 	iport->last_active_phy     = 0;
1593 	iport->ready_exit	   = false;
1594 
1595 	iport->owning_controller = ihost;
1596 
1597 	iport->started_request_count = 0;
1598 	iport->assigned_device_count = 0;
1599 
1600 	iport->reserved_rni = SCU_DUMMY_INDEX;
1601 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1602 
1603 	sci_init_timer(&iport->timer, port_timeout);
1604 
1605 	iport->port_task_scheduler_registers = NULL;
1606 
1607 	for (index = 0; index < SCI_MAX_PHYS; index++)
1608 		iport->phy_table[index] = NULL;
1609 }
1610 
isci_port_init(struct isci_port * iport,struct isci_host * ihost,int index)1611 void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1612 {
1613 	INIT_LIST_HEAD(&iport->remote_dev_list);
1614 	INIT_LIST_HEAD(&iport->domain_dev_list);
1615 	iport->isci_host = ihost;
1616 }
1617 
sci_port_broadcast_change_received(struct isci_port * iport,struct isci_phy * iphy)1618 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1619 {
1620 	struct isci_host *ihost = iport->owning_controller;
1621 
1622 	/* notify the user. */
1623 	isci_port_bc_change_received(ihost, iport, iphy);
1624 }
1625 
wait_port_reset(struct isci_host * ihost,struct isci_port * iport)1626 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1627 {
1628 	wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1629 }
1630 
isci_port_perform_hard_reset(struct isci_host * ihost,struct isci_port * iport,struct isci_phy * iphy)1631 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1632 				 struct isci_phy *iphy)
1633 {
1634 	unsigned long flags;
1635 	enum sci_status status;
1636 	int ret = TMF_RESP_FUNC_COMPLETE;
1637 
1638 	dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1639 		__func__, iport);
1640 
1641 	spin_lock_irqsave(&ihost->scic_lock, flags);
1642 	set_bit(IPORT_RESET_PENDING, &iport->state);
1643 
1644 	#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1645 	status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1646 
1647 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1648 
1649 	if (status == SCI_SUCCESS) {
1650 		wait_port_reset(ihost, iport);
1651 
1652 		dev_dbg(&ihost->pdev->dev,
1653 			"%s: iport = %p; hard reset completion\n",
1654 			__func__, iport);
1655 
1656 		if (iport->hard_reset_status != SCI_SUCCESS) {
1657 			ret = TMF_RESP_FUNC_FAILED;
1658 
1659 			dev_err(&ihost->pdev->dev,
1660 				"%s: iport = %p; hard reset failed (0x%x)\n",
1661 				__func__, iport, iport->hard_reset_status);
1662 		}
1663 	} else {
1664 		clear_bit(IPORT_RESET_PENDING, &iport->state);
1665 		wake_up(&ihost->eventq);
1666 		ret = TMF_RESP_FUNC_FAILED;
1667 
1668 		dev_err(&ihost->pdev->dev,
1669 			"%s: iport = %p; sci_port_hard_reset call"
1670 			" failed 0x%x\n",
1671 			__func__, iport, status);
1672 
1673 	}
1674 
1675 	/* If the hard reset for the port has failed, consider this
1676 	 * the same as link failures on all phys in the port.
1677 	 */
1678 	if (ret != TMF_RESP_FUNC_COMPLETE) {
1679 
1680 		dev_err(&ihost->pdev->dev,
1681 			"%s: iport = %p; hard reset failed "
1682 			"(0x%x) - driving explicit link fail for all phys\n",
1683 			__func__, iport, iport->hard_reset_status);
1684 	}
1685 	return ret;
1686 }
1687 
isci_ata_check_ready(struct domain_device * dev)1688 int isci_ata_check_ready(struct domain_device *dev)
1689 {
1690 	struct isci_port *iport = dev->port->lldd_port;
1691 	struct isci_host *ihost = dev_to_ihost(dev);
1692 	struct isci_remote_device *idev;
1693 	unsigned long flags;
1694 	int rc = 0;
1695 
1696 	spin_lock_irqsave(&ihost->scic_lock, flags);
1697 	idev = isci_lookup_device(dev);
1698 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1699 
1700 	if (!idev)
1701 		goto out;
1702 
1703 	if (test_bit(IPORT_RESET_PENDING, &iport->state))
1704 		goto out;
1705 
1706 	rc = !!iport->active_phy_mask;
1707  out:
1708 	isci_put_device(idev);
1709 
1710 	return rc;
1711 }
1712 
isci_port_deformed(struct asd_sas_phy * phy)1713 void isci_port_deformed(struct asd_sas_phy *phy)
1714 {
1715 	struct isci_host *ihost = phy->ha->lldd_ha;
1716 	struct isci_port *iport = phy->port->lldd_port;
1717 	unsigned long flags;
1718 	int i;
1719 
1720 	/* we got a port notification on a port that was subsequently
1721 	 * torn down and libsas is just now catching up
1722 	 */
1723 	if (!iport)
1724 		return;
1725 
1726 	spin_lock_irqsave(&ihost->scic_lock, flags);
1727 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1728 		if (iport->active_phy_mask & 1 << i)
1729 			break;
1730 	}
1731 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1732 
1733 	if (i >= SCI_MAX_PHYS)
1734 		dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1735 			__func__, (long) (iport - &ihost->ports[0]));
1736 }
1737 
isci_port_formed(struct asd_sas_phy * phy)1738 void isci_port_formed(struct asd_sas_phy *phy)
1739 {
1740 	struct isci_host *ihost = phy->ha->lldd_ha;
1741 	struct isci_phy *iphy = to_iphy(phy);
1742 	struct asd_sas_port *port = phy->port;
1743 	struct isci_port *iport;
1744 	unsigned long flags;
1745 	int i;
1746 
1747 	/* initial ports are formed as the driver is still initializing,
1748 	 * wait for that process to complete
1749 	 */
1750 	wait_for_start(ihost);
1751 
1752 	spin_lock_irqsave(&ihost->scic_lock, flags);
1753 	for (i = 0; i < SCI_MAX_PORTS; i++) {
1754 		iport = &ihost->ports[i];
1755 		if (iport->active_phy_mask & 1 << iphy->phy_index)
1756 			break;
1757 	}
1758 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1759 
1760 	if (i >= SCI_MAX_PORTS)
1761 		iport = NULL;
1762 
1763 	port->lldd_port = iport;
1764 }
1765