9b8632f33dd95e8a5f2f647fc813f472238f9ca7
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / isci / task.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include <scsi/libsas.h>
60 #include "remote_device.h"
61 #include "remote_node_context.h"
62 #include "isci.h"
63 #include "request.h"
64 #include "task.h"
65 #include "host.h"
66
67 /**
68 * isci_task_refuse() - complete the request to the upper layer driver in
69 *     the case where an I/O needs to be completed back in the submit path.
70 * @ihost: host on which the the request was queued
71 * @task: request to complete
72 * @response: response code for the completed task.
73 * @status: status code for the completed task.
74 *
75 */
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77                              enum service_response response,
78                              enum exec_status status)
79
80 {
81         enum isci_completion_selection disposition;
82
83         disposition = isci_perform_normal_io_completion;
84         disposition = isci_task_set_completion_status(task, response, status,
85                                                       disposition);
86
87         /* Tasks aborted specifically by a call to the lldd_abort_task
88          * function should not be completed to the host in the regular path.
89          */
90         switch (disposition) {
91         case isci_perform_normal_io_completion:
92                 /* Normal notification (task_done) */
93                 dev_dbg(&ihost->pdev->dev,
94                         "%s: Normal - task = %p, response=%d, "
95                         "status=%d\n",
96                         __func__, task, response, status);
97
98                 task->lldd_task = NULL;
99                 task->task_done(task);
100                 break;
101
102         case isci_perform_aborted_io_completion:
103                 /*
104                  * No notification because this request is already in the
105                  * abort path.
106                  */
107                 dev_dbg(&ihost->pdev->dev,
108                         "%s: Aborted - task = %p, response=%d, "
109                         "status=%d\n",
110                         __func__, task, response, status);
111                 break;
112
113         case isci_perform_error_io_completion:
114                 /* Use sas_task_abort */
115                 dev_dbg(&ihost->pdev->dev,
116                         "%s: Error - task = %p, response=%d, "
117                         "status=%d\n",
118                         __func__, task, response, status);
119                 sas_task_abort(task);
120                 break;
121
122         default:
123                 dev_dbg(&ihost->pdev->dev,
124                         "%s: isci task notification default case!",
125                         __func__);
126                 sas_task_abort(task);
127                 break;
128         }
129 }
130
131 #define for_each_sas_task(num, task) \
132         for (; num > 0; num--,\
133              task = list_entry(task->list.next, struct sas_task, list))
134
135
136 static inline int isci_device_io_ready(struct isci_remote_device *idev,
137                                        struct sas_task *task)
138 {
139         return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
140                       (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
141                        isci_task_is_ncq_recovery(task))
142                     : 0;
143 }
144 /**
145  * isci_task_execute_task() - This function is one of the SAS Domain Template
146  *    functions. This function is called by libsas to send a task down to
147  *    hardware.
148  * @task: This parameter specifies the SAS task to send.
149  * @num: This parameter specifies the number of tasks to queue.
150  * @gfp_flags: This parameter specifies the context of this call.
151  *
152  * status, zero indicates success.
153  */
154 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
155 {
156         struct isci_host *ihost = dev_to_ihost(task->dev);
157         struct isci_remote_device *idev;
158         unsigned long flags;
159         bool io_ready;
160         u16 tag;
161
162         dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
163
164         for_each_sas_task(num, task) {
165                 enum sci_status status = SCI_FAILURE;
166
167                 spin_lock_irqsave(&ihost->scic_lock, flags);
168                 idev = isci_lookup_device(task->dev);
169                 io_ready = isci_device_io_ready(idev, task);
170                 tag = isci_alloc_tag(ihost);
171                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
172
173                 dev_dbg(&ihost->pdev->dev,
174                         "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
175                         task, num, task->dev, idev, idev ? idev->flags : 0,
176                         task->uldd_task);
177
178                 if (!idev) {
179                         isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
180                                          SAS_DEVICE_UNKNOWN);
181                 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
182                         /* Indicate QUEUE_FULL so that the scsi midlayer
183                          * retries.
184                           */
185                         isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
186                                          SAS_QUEUE_FULL);
187                 } else {
188                         /* There is a device and it's ready for I/O. */
189                         spin_lock_irqsave(&task->task_state_lock, flags);
190
191                         if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
192                                 /* The I/O was aborted. */
193                                 spin_unlock_irqrestore(&task->task_state_lock,
194                                                        flags);
195
196                                 isci_task_refuse(ihost, task,
197                                                  SAS_TASK_UNDELIVERED,
198                                                  SAM_STAT_TASK_ABORTED);
199                         } else {
200                                 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
201                                 spin_unlock_irqrestore(&task->task_state_lock, flags);
202
203                                 /* build and send the request. */
204                                 status = isci_request_execute(ihost, idev, task, tag);
205
206                                 if (status != SCI_SUCCESS) {
207
208                                         spin_lock_irqsave(&task->task_state_lock, flags);
209                                         /* Did not really start this command. */
210                                         task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
211                                         spin_unlock_irqrestore(&task->task_state_lock, flags);
212
213                                         if (test_bit(IDEV_GONE, &idev->flags)) {
214
215                                                 /* Indicate that the device
216                                                  * is gone.
217                                                  */
218                                                 isci_task_refuse(ihost, task,
219                                                         SAS_TASK_UNDELIVERED,
220                                                         SAS_DEVICE_UNKNOWN);
221                                         } else {
222                                                 /* Indicate QUEUE_FULL so that
223                                                  * the scsi midlayer retries.
224                                                  * If the request failed for
225                                                  * remote device reasons, it
226                                                  * gets returned as
227                                                  * SAS_TASK_UNDELIVERED next
228                                                  * time through.
229                                                  */
230                                                 isci_task_refuse(ihost, task,
231                                                         SAS_TASK_COMPLETE,
232                                                         SAS_QUEUE_FULL);
233                                         }
234                                 }
235                         }
236                 }
237                 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
238                         spin_lock_irqsave(&ihost->scic_lock, flags);
239                         /* command never hit the device, so just free
240                          * the tci and skip the sequence increment
241                          */
242                         isci_tci_free(ihost, ISCI_TAG_TCI(tag));
243                         spin_unlock_irqrestore(&ihost->scic_lock, flags);
244                 }
245                 isci_put_device(idev);
246         }
247         return 0;
248 }
249
250 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
251                                                     struct isci_remote_device *idev,
252                                                     u16 tag, struct isci_tmf *isci_tmf)
253 {
254         enum sci_status status = SCI_FAILURE;
255         struct isci_request *ireq = NULL;
256         struct domain_device *dev;
257
258         dev_dbg(&ihost->pdev->dev,
259                 "%s: isci_tmf = %p\n", __func__, isci_tmf);
260
261         dev = idev->domain_dev;
262
263         /* do common allocation and init of request object. */
264         ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
265         if (!ireq)
266                 return NULL;
267
268         /* let the core do it's construct. */
269         status = sci_task_request_construct(ihost, idev, tag,
270                                              ireq);
271
272         if (status != SCI_SUCCESS) {
273                 dev_warn(&ihost->pdev->dev,
274                          "%s: sci_task_request_construct failed - "
275                          "status = 0x%x\n",
276                          __func__,
277                          status);
278                 return NULL;
279         }
280
281         /* XXX convert to get this from task->tproto like other drivers */
282         if (dev->dev_type == SAS_END_DEV) {
283                 isci_tmf->proto = SAS_PROTOCOL_SSP;
284                 status = sci_task_request_construct_ssp(ireq);
285                 if (status != SCI_SUCCESS)
286                         return NULL;
287         }
288
289         return ireq;
290 }
291
292 /**
293 * isci_request_mark_zombie() - This function must be called with scic_lock held.
294 */
295 static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
296 {
297         struct completion *tmf_completion = NULL;
298         struct completion *req_completion;
299
300         /* Set the request state to "dead". */
301         ireq->status = dead;
302
303         req_completion = ireq->io_request_completion;
304         ireq->io_request_completion = NULL;
305
306         if (test_bit(IREQ_TMF, &ireq->flags)) {
307                 /* Break links with the TMF request. */
308                 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
309
310                 /* In the case where a task request is dying,
311                  * the thread waiting on the complete will sit and
312                  * timeout unless we wake it now.  Since the TMF
313                  * has a default error status, complete it here
314                  * to wake the waiting thread.
315                  */
316                 if (tmf) {
317                         tmf_completion = tmf->complete;
318                         tmf->complete = NULL;
319                 }
320                 ireq->ttype_ptr.tmf_task_ptr = NULL;
321                 dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
322                         __func__, tmf->tmf_code, tmf->io_tag);
323         } else {
324                 /* Break links with the sas_task - the callback is done
325                  * elsewhere.
326                  */
327                 struct sas_task *task = isci_request_access_task(ireq);
328
329                 if (task)
330                         task->lldd_task = NULL;
331
332                 ireq->ttype_ptr.io_task_ptr = NULL;
333         }
334
335         dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
336                  ireq->io_tag);
337
338         /* Don't force waiting threads to timeout. */
339         if (req_completion)
340                 complete(req_completion);
341
342         if (tmf_completion != NULL)
343                 complete(tmf_completion);
344 }
345
346 static int isci_task_execute_tmf(struct isci_host *ihost,
347                                  struct isci_remote_device *idev,
348                                  struct isci_tmf *tmf, unsigned long timeout_ms)
349 {
350         DECLARE_COMPLETION_ONSTACK(completion);
351         enum sci_task_status status = SCI_TASK_FAILURE;
352         struct isci_request *ireq;
353         int ret = TMF_RESP_FUNC_FAILED;
354         unsigned long flags;
355         unsigned long timeleft;
356         u16 tag;
357
358         spin_lock_irqsave(&ihost->scic_lock, flags);
359         tag = isci_alloc_tag(ihost);
360         spin_unlock_irqrestore(&ihost->scic_lock, flags);
361
362         if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
363                 return ret;
364
365         /* sanity check, return TMF_RESP_FUNC_FAILED
366          * if the device is not there and ready.
367          */
368         if (!idev ||
369             (!test_bit(IDEV_IO_READY, &idev->flags) &&
370              !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
371                 dev_dbg(&ihost->pdev->dev,
372                         "%s: idev = %p not ready (%#lx)\n",
373                         __func__,
374                         idev, idev ? idev->flags : 0);
375                 goto err_tci;
376         } else
377                 dev_dbg(&ihost->pdev->dev,
378                         "%s: idev = %p\n",
379                         __func__, idev);
380
381         /* Assign the pointer to the TMF's completion kernel wait structure. */
382         tmf->complete = &completion;
383         tmf->status = SCI_FAILURE_TIMEOUT;
384
385         ireq = isci_task_request_build(ihost, idev, tag, tmf);
386         if (!ireq)
387                 goto err_tci;
388
389         spin_lock_irqsave(&ihost->scic_lock, flags);
390
391         /* start the TMF io. */
392         status = sci_controller_start_task(ihost, idev, ireq);
393
394         if (status != SCI_TASK_SUCCESS) {
395                 dev_dbg(&ihost->pdev->dev,
396                          "%s: start_io failed - status = 0x%x, request = %p\n",
397                          __func__,
398                          status,
399                          ireq);
400                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
401                 goto err_tci;
402         }
403
404         if (tmf->cb_state_func != NULL)
405                 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
406
407         isci_request_change_state(ireq, started);
408
409         /* add the request to the remote device request list. */
410         list_add(&ireq->dev_node, &idev->reqs_in_process);
411
412         spin_unlock_irqrestore(&ihost->scic_lock, flags);
413
414         /* Wait for the TMF to complete, or a timeout. */
415         timeleft = wait_for_completion_timeout(&completion,
416                                                msecs_to_jiffies(timeout_ms));
417
418         if (timeleft == 0) {
419                 /* The TMF did not complete - this could be because
420                  * of an unplug.  Terminate the TMF request now.
421                  */
422                 spin_lock_irqsave(&ihost->scic_lock, flags);
423
424                 if (tmf->cb_state_func != NULL)
425                         tmf->cb_state_func(isci_tmf_timed_out, tmf,
426                                            tmf->cb_data);
427
428                 sci_controller_terminate_request(ihost, idev, ireq);
429
430                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
431
432                 timeleft = wait_for_completion_timeout(
433                         &completion,
434                         msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
435
436                 if (!timeleft) {
437                         /* Strange condition - the termination of the TMF
438                          * request timed-out.
439                          */
440                         spin_lock_irqsave(&ihost->scic_lock, flags);
441
442                         /* If the TMF status has not changed, kill it. */
443                         if (tmf->status == SCI_FAILURE_TIMEOUT)
444                                 isci_request_mark_zombie(ihost, ireq);
445
446                         spin_unlock_irqrestore(&ihost->scic_lock, flags);
447                 }
448         }
449
450         isci_print_tmf(ihost, tmf);
451
452         if (tmf->status == SCI_SUCCESS)
453                 ret =  TMF_RESP_FUNC_COMPLETE;
454         else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
455                 dev_dbg(&ihost->pdev->dev,
456                         "%s: tmf.status == "
457                         "SCI_FAILURE_IO_RESPONSE_VALID\n",
458                         __func__);
459                 ret =  TMF_RESP_FUNC_COMPLETE;
460         }
461         /* Else - leave the default "failed" status alone. */
462
463         dev_dbg(&ihost->pdev->dev,
464                 "%s: completed request = %p\n",
465                 __func__,
466                 ireq);
467
468         return ret;
469
470  err_tci:
471         spin_lock_irqsave(&ihost->scic_lock, flags);
472         isci_tci_free(ihost, ISCI_TAG_TCI(tag));
473         spin_unlock_irqrestore(&ihost->scic_lock, flags);
474
475         return ret;
476 }
477
478 static void isci_task_build_tmf(struct isci_tmf *tmf,
479                                 enum isci_tmf_function_codes code,
480                                 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
481                                                     struct isci_tmf *,
482                                                     void *),
483                                 void *cb_data)
484 {
485         memset(tmf, 0, sizeof(*tmf));
486
487         tmf->tmf_code      = code;
488         tmf->cb_state_func = tmf_sent_cb;
489         tmf->cb_data       = cb_data;
490 }
491
492 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
493                                            enum isci_tmf_function_codes code,
494                                            void (*tmf_sent_cb)(enum isci_tmf_cb_state,
495                                                                struct isci_tmf *,
496                                                                void *),
497                                            struct isci_request *old_request)
498 {
499         isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
500         tmf->io_tag = old_request->io_tag;
501 }
502
503 /**
504  * isci_task_validate_request_to_abort() - This function checks the given I/O
505  *    against the "started" state.  If the request is still "started", it's
506  *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
507  *    BEFORE CALLING THIS FUNCTION.
508  * @isci_request: This parameter specifies the request object to control.
509  * @isci_host: This parameter specifies the ISCI host object
510  * @isci_device: This is the device to which the request is pending.
511  * @aborted_io_completion: This is a completion structure that will be added to
512  *    the request in case it is changed to aborting; this completion is
513  *    triggered when the request is fully completed.
514  *
515  * Either "started" on successful change of the task status to "aborted", or
516  * "unallocated" if the task cannot be controlled.
517  */
518 static enum isci_request_status isci_task_validate_request_to_abort(
519         struct isci_request *isci_request,
520         struct isci_host *isci_host,
521         struct isci_remote_device *isci_device,
522         struct completion *aborted_io_completion)
523 {
524         enum isci_request_status old_state = unallocated;
525
526         /* Only abort the task if it's in the
527          *  device's request_in_process list
528          */
529         if (isci_request && !list_empty(&isci_request->dev_node)) {
530                 old_state = isci_request_change_started_to_aborted(
531                         isci_request, aborted_io_completion);
532
533         }
534
535         return old_state;
536 }
537
538 static int isci_request_is_dealloc_managed(enum isci_request_status stat)
539 {
540         switch (stat) {
541         case aborted:
542         case aborting:
543         case terminating:
544         case completed:
545         case dead:
546                 return true;
547         default:
548                 return false;
549         }
550 }
551
552 /**
553  * isci_terminate_request_core() - This function will terminate the given
554  *    request, and wait for it to complete.  This function must only be called
555  *    from a thread that can wait.  Note that the request is terminated and
556  *    completed (back to the host, if started there).
557  * @ihost: This SCU.
558  * @idev: The target.
559  * @isci_request: The I/O request to be terminated.
560  *
561  */
562 static void isci_terminate_request_core(struct isci_host *ihost,
563                                         struct isci_remote_device *idev,
564                                         struct isci_request *isci_request)
565 {
566         enum sci_status status      = SCI_SUCCESS;
567         bool was_terminated         = false;
568         bool needs_cleanup_handling = false;
569         unsigned long     flags;
570         unsigned long     termination_completed = 1;
571         struct completion *io_request_completion;
572
573         dev_dbg(&ihost->pdev->dev,
574                 "%s: device = %p; request = %p\n",
575                 __func__, idev, isci_request);
576
577         spin_lock_irqsave(&ihost->scic_lock, flags);
578
579         io_request_completion = isci_request->io_request_completion;
580
581         /* Note that we are not going to control
582          * the target to abort the request.
583          */
584         set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
585
586         /* Make sure the request wasn't just sitting around signalling
587          * device condition (if the request handle is NULL, then the
588          * request completed but needed additional handling here).
589          */
590         if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
591                 was_terminated = true;
592                 needs_cleanup_handling = true;
593                 status = sci_controller_terminate_request(ihost,
594                                                            idev,
595                                                            isci_request);
596         }
597         spin_unlock_irqrestore(&ihost->scic_lock, flags);
598
599         /*
600          * The only time the request to terminate will
601          * fail is when the io request is completed and
602          * being aborted.
603          */
604         if (status != SCI_SUCCESS) {
605                 dev_dbg(&ihost->pdev->dev,
606                         "%s: sci_controller_terminate_request"
607                         " returned = 0x%x\n",
608                         __func__, status);
609
610                 isci_request->io_request_completion = NULL;
611
612         } else {
613                 if (was_terminated) {
614                         dev_dbg(&ihost->pdev->dev,
615                                 "%s: before completion wait (%p/%p)\n",
616                                 __func__, isci_request, io_request_completion);
617
618                         /* Wait here for the request to complete. */
619                         termination_completed
620                                 = wait_for_completion_timeout(
621                                    io_request_completion,
622                                    msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
623
624                         if (!termination_completed) {
625
626                                 /* The request to terminate has timed out.  */
627                                 spin_lock_irqsave(&ihost->scic_lock, flags);
628
629                                 /* Check for state changes. */
630                                 if (!test_bit(IREQ_TERMINATED,
631                                               &isci_request->flags)) {
632
633                                         /* The best we can do is to have the
634                                          * request die a silent death if it
635                                          * ever really completes.
636                                          */
637                                         isci_request_mark_zombie(ihost,
638                                                                  isci_request);
639                                         needs_cleanup_handling = true;
640                                 } else
641                                         termination_completed = 1;
642
643                                 spin_unlock_irqrestore(&ihost->scic_lock,
644                                                        flags);
645
646                                 if (!termination_completed) {
647
648                                         dev_dbg(&ihost->pdev->dev,
649                                                 "%s: *** Timeout waiting for "
650                                                 "termination(%p/%p)\n",
651                                                 __func__, io_request_completion,
652                                                 isci_request);
653
654                                         /* The request can no longer be referenced
655                                          * safely since it may go away if the
656                                          * termination every really does complete.
657                                          */
658                                         isci_request = NULL;
659                                 }
660                         }
661                         if (termination_completed)
662                                 dev_dbg(&ihost->pdev->dev,
663                                         "%s: after completion wait (%p/%p)\n",
664                                         __func__, isci_request, io_request_completion);
665                 }
666
667                 if (termination_completed) {
668
669                         isci_request->io_request_completion = NULL;
670
671                         /* Peek at the status of the request.  This will tell
672                          * us if there was special handling on the request such that it
673                          * needs to be detached and freed here.
674                          */
675                         spin_lock_irqsave(&isci_request->state_lock, flags);
676
677                         needs_cleanup_handling
678                                 = isci_request_is_dealloc_managed(
679                                         isci_request->status);
680
681                         spin_unlock_irqrestore(&isci_request->state_lock, flags);
682
683                 }
684                 if (needs_cleanup_handling) {
685
686                         dev_dbg(&ihost->pdev->dev,
687                                 "%s: cleanup isci_device=%p, request=%p\n",
688                                 __func__, idev, isci_request);
689
690                         if (isci_request != NULL) {
691                                 spin_lock_irqsave(&ihost->scic_lock, flags);
692                                 isci_free_tag(ihost, isci_request->io_tag);
693                                 isci_request_change_state(isci_request, unallocated);
694                                 list_del_init(&isci_request->dev_node);
695                                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
696                         }
697                 }
698         }
699 }
700
701 /**
702  * isci_terminate_pending_requests() - This function will change the all of the
703  *    requests on the given device's state to "aborting", will terminate the
704  *    requests, and wait for them to complete.  This function must only be
705  *    called from a thread that can wait.  Note that the requests are all
706  *    terminated and completed (back to the host, if started there).
707  * @isci_host: This parameter specifies SCU.
708  * @idev: This parameter specifies the target.
709  *
710  */
711 void isci_terminate_pending_requests(struct isci_host *ihost,
712                                      struct isci_remote_device *idev)
713 {
714         struct completion request_completion;
715         enum isci_request_status old_state;
716         unsigned long flags;
717         LIST_HEAD(list);
718
719         isci_remote_device_suspend(ihost, idev);
720
721         spin_lock_irqsave(&ihost->scic_lock, flags);
722         list_splice_init(&idev->reqs_in_process, &list);
723
724         /* assumes that isci_terminate_request_core deletes from the list */
725         while (!list_empty(&list)) {
726                 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
727
728                 /* Change state to "terminating" if it is currently
729                  * "started".
730                  */
731                 old_state = isci_request_change_started_to_newstate(ireq,
732                                                                     &request_completion,
733                                                                     terminating);
734                 switch (old_state) {
735                 case started:
736                 case completed:
737                 case aborting:
738                         break;
739                 default:
740                         /* termination in progress, or otherwise dispositioned.
741                          * We know the request was on 'list' so should be safe
742                          * to move it back to reqs_in_process
743                          */
744                         list_move(&ireq->dev_node, &idev->reqs_in_process);
745                         ireq = NULL;
746                         break;
747                 }
748
749                 if (!ireq)
750                         continue;
751                 spin_unlock_irqrestore(&ihost->scic_lock, flags);
752
753                 init_completion(&request_completion);
754
755                 dev_dbg(&ihost->pdev->dev,
756                          "%s: idev=%p request=%p; task=%p old_state=%d\n",
757                          __func__, idev, ireq,
758                         (!test_bit(IREQ_TMF, &ireq->flags)
759                                 ? isci_request_access_task(ireq)
760                                 : NULL),
761                         old_state);
762
763                 /* If the old_state is started:
764                  * This request was not already being aborted. If it had been,
765                  * then the aborting I/O (ie. the TMF request) would not be in
766                  * the aborting state, and thus would be terminated here.  Note
767                  * that since the TMF completion's call to the kernel function
768                  * "complete()" does not happen until the pending I/O request
769                  * terminate fully completes, we do not have to implement a
770                  * special wait here for already aborting requests - the
771                  * termination of the TMF request will force the request
772                  * to finish it's already started terminate.
773                  *
774                  * If old_state == completed:
775                  * This request completed from the SCU hardware perspective
776                  * and now just needs cleaning up in terms of freeing the
777                  * request and potentially calling up to libsas.
778                  *
779                  * If old_state == aborting:
780                  * This request has already gone through a TMF timeout, but may
781                  * not have been terminated; needs cleaning up at least.
782                  */
783                 isci_terminate_request_core(ihost, idev, ireq);
784                 spin_lock_irqsave(&ihost->scic_lock, flags);
785         }
786         spin_unlock_irqrestore(&ihost->scic_lock, flags);
787 }
788
789 /**
790  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
791  *    Template functions.
792  * @lun: This parameter specifies the lun to be reset.
793  *
794  * status, zero indicates success.
795  */
796 static int isci_task_send_lu_reset_sas(
797         struct isci_host *isci_host,
798         struct isci_remote_device *isci_device,
799         u8 *lun)
800 {
801         struct isci_tmf tmf;
802         int ret = TMF_RESP_FUNC_FAILED;
803
804         dev_dbg(&isci_host->pdev->dev,
805                 "%s: isci_host = %p, isci_device = %p\n",
806                 __func__, isci_host, isci_device);
807         /* Send the LUN reset to the target.  By the time the call returns,
808          * the TMF has fully exected in the target (in which case the return
809          * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
810          * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
811          */
812         isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
813
814         #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
815         ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
816
817         if (ret == TMF_RESP_FUNC_COMPLETE)
818                 dev_dbg(&isci_host->pdev->dev,
819                         "%s: %p: TMF_LU_RESET passed\n",
820                         __func__, isci_device);
821         else
822                 dev_dbg(&isci_host->pdev->dev,
823                         "%s: %p: TMF_LU_RESET failed (%x)\n",
824                         __func__, isci_device, ret);
825
826         return ret;
827 }
828
829 int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
830 {
831         struct isci_host *ihost = dev_to_ihost(dev);
832         struct isci_remote_device *isci_device;
833         unsigned long flags;
834         int ret;
835
836         spin_lock_irqsave(&ihost->scic_lock, flags);
837         isci_device = isci_lookup_device(dev);
838         spin_unlock_irqrestore(&ihost->scic_lock, flags);
839
840         dev_dbg(&ihost->pdev->dev,
841                 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
842                 __func__, dev, ihost, isci_device);
843
844         if (!isci_device) {
845                 /* If the device is gone, stop the escalations. */
846                 dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
847
848                 ret = TMF_RESP_FUNC_COMPLETE;
849                 goto out;
850         }
851         if (isci_remote_device_suspend(ihost, isci_device) != SCI_SUCCESS) {
852                 dev_dbg(&ihost->pdev->dev,
853                         "%s:  device = %p; failed to suspend\n",
854                         __func__, isci_device);
855                 ret = TMF_RESP_FUNC_FAILED;
856                 goto out;
857         }
858
859         /* Send the task management part of the reset. */
860         if (dev_is_sata(dev)) {
861                 sas_ata_schedule_reset(dev);
862                 ret = TMF_RESP_FUNC_COMPLETE;
863         } else
864                 ret = isci_task_send_lu_reset_sas(ihost, isci_device, lun);
865
866         /* If the LUN reset worked, all the I/O can now be terminated. */
867         if (ret == TMF_RESP_FUNC_COMPLETE) {
868                 /* Terminate all I/O now. */
869                 isci_terminate_pending_requests(ihost, isci_device);
870                 isci_remote_device_resume(ihost, isci_device, NULL, NULL);
871         }
872  out:
873         isci_put_device(isci_device);
874         return ret;
875 }
876
877
878 /*       int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
879 int isci_task_clear_nexus_port(struct asd_sas_port *port)
880 {
881         return TMF_RESP_FUNC_FAILED;
882 }
883
884
885
886 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
887 {
888         return TMF_RESP_FUNC_FAILED;
889 }
890
891 /* Task Management Functions. Must be called from process context.       */
892
893 /**
894  * isci_abort_task_process_cb() - This is a helper function for the abort task
895  *    TMF command.  It manages the request state with respect to the successful
896  *    transmission / completion of the abort task request.
897  * @cb_state: This parameter specifies when this function was called - after
898  *    the TMF request has been started and after it has timed-out.
899  * @tmf: This parameter specifies the TMF in progress.
900  *
901  *
902  */
903 static void isci_abort_task_process_cb(
904         enum isci_tmf_cb_state cb_state,
905         struct isci_tmf *tmf,
906         void *cb_data)
907 {
908         struct isci_request *old_request;
909
910         old_request = (struct isci_request *)cb_data;
911
912         dev_dbg(&old_request->isci_host->pdev->dev,
913                 "%s: tmf=%p, old_request=%p\n",
914                 __func__, tmf, old_request);
915
916         switch (cb_state) {
917
918         case isci_tmf_started:
919                 /* The TMF has been started.  Nothing to do here, since the
920                  * request state was already set to "aborted" by the abort
921                  * task function.
922                  */
923                 if ((old_request->status != aborted)
924                         && (old_request->status != completed))
925                         dev_dbg(&old_request->isci_host->pdev->dev,
926                                 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
927                                 __func__, old_request->status, tmf, old_request);
928                 break;
929
930         case isci_tmf_timed_out:
931
932                 /* Set the task's state to "aborting", since the abort task
933                  * function thread set it to "aborted" (above) in anticipation
934                  * of the task management request working correctly.  Since the
935                  * timeout has now fired, the TMF request failed.  We set the
936                  * state such that the request completion will indicate the
937                  * device is no longer present.
938                  */
939                 isci_request_change_state(old_request, aborting);
940                 break;
941
942         default:
943                 dev_dbg(&old_request->isci_host->pdev->dev,
944                         "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
945                         __func__, cb_state, tmf, old_request);
946                 break;
947         }
948 }
949
950 /**
951  * isci_task_abort_task() - This function is one of the SAS Domain Template
952  *    functions. This function is called by libsas to abort a specified task.
953  * @task: This parameter specifies the SAS task to abort.
954  *
955  * status, zero indicates success.
956  */
957 int isci_task_abort_task(struct sas_task *task)
958 {
959         struct isci_host *isci_host = dev_to_ihost(task->dev);
960         DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
961         struct isci_request       *old_request = NULL;
962         enum isci_request_status  old_state;
963         struct isci_remote_device *isci_device = NULL;
964         struct isci_tmf           tmf;
965         int                       ret = TMF_RESP_FUNC_FAILED;
966         unsigned long             flags;
967         int                       perform_termination = 0;
968
969         /* Get the isci_request reference from the task.  Note that
970          * this check does not depend on the pending request list
971          * in the device, because tasks driving resets may land here
972          * after completion in the core.
973          */
974         spin_lock_irqsave(&isci_host->scic_lock, flags);
975         spin_lock(&task->task_state_lock);
976
977         old_request = task->lldd_task;
978
979         /* If task is already done, the request isn't valid */
980         if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
981             (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
982             old_request)
983                 isci_device = isci_lookup_device(task->dev);
984
985         spin_unlock(&task->task_state_lock);
986         spin_unlock_irqrestore(&isci_host->scic_lock, flags);
987
988         dev_warn(&isci_host->pdev->dev,
989                 "%s: dev = %p, task = %p, old_request == %p\n",
990                 __func__, isci_device, task, old_request);
991
992         /* Device reset conditions signalled in task_state_flags are the
993          * responsbility of libsas to observe at the start of the error
994          * handler thread.
995          */
996         if (!isci_device || !old_request) {
997                 /* The request has already completed and there
998                 * is nothing to do here other than to set the task
999                 * done bit, and indicate that the task abort function
1000                 * was sucessful.
1001                 */
1002                 spin_lock_irqsave(&task->task_state_lock, flags);
1003                 task->task_state_flags |= SAS_TASK_STATE_DONE;
1004                 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1005                                             SAS_TASK_STATE_PENDING);
1006                 spin_unlock_irqrestore(&task->task_state_lock, flags);
1007
1008                 ret = TMF_RESP_FUNC_COMPLETE;
1009
1010                 dev_warn(&isci_host->pdev->dev,
1011                         "%s: abort task not needed for %p\n",
1012                         __func__, task);
1013                 goto out;
1014         }
1015
1016         spin_lock_irqsave(&isci_host->scic_lock, flags);
1017
1018         /* Check the request status and change to "aborted" if currently
1019          * "starting"; if true then set the I/O kernel completion
1020          * struct that will be triggered when the request completes.
1021          */
1022         old_state = isci_task_validate_request_to_abort(
1023                                 old_request, isci_host, isci_device,
1024                                 &aborted_io_completion);
1025         if ((old_state != started) &&
1026             (old_state != completed) &&
1027             (old_state != aborting)) {
1028
1029                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1030
1031                 /* The request was already being handled by someone else (because
1032                 * they got to set the state away from started).
1033                 */
1034                 dev_warn(&isci_host->pdev->dev,
1035                         "%s:  device = %p; old_request %p already being aborted\n",
1036                         __func__,
1037                         isci_device, old_request);
1038                 ret = TMF_RESP_FUNC_COMPLETE;
1039                 goto out;
1040         }
1041         if (task->task_proto == SAS_PROTOCOL_SMP ||
1042             sas_protocol_ata(task->task_proto) ||
1043             test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1044
1045                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1046
1047                 dev_warn(&isci_host->pdev->dev,
1048                         "%s: %s request"
1049                         " or complete_in_target (%d), thus no TMF\n",
1050                         __func__,
1051                         ((task->task_proto == SAS_PROTOCOL_SMP)
1052                                 ? "SMP"
1053                                 : (sas_protocol_ata(task->task_proto)
1054                                         ? "SATA/STP"
1055                                         : "<other>")
1056                          ),
1057                         test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1058
1059                 if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1060                         spin_lock_irqsave(&task->task_state_lock, flags);
1061                         task->task_state_flags |= SAS_TASK_STATE_DONE;
1062                         task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1063                                                     SAS_TASK_STATE_PENDING);
1064                         spin_unlock_irqrestore(&task->task_state_lock, flags);
1065                         ret = TMF_RESP_FUNC_COMPLETE;
1066                 } else {
1067                         spin_lock_irqsave(&task->task_state_lock, flags);
1068                         task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1069                                                     SAS_TASK_STATE_PENDING);
1070                         spin_unlock_irqrestore(&task->task_state_lock, flags);
1071                 }
1072
1073                 /* STP and SMP devices are not sent a TMF, but the
1074                  * outstanding I/O request is terminated below.  This is
1075                  * because SATA/STP and SMP discovery path timeouts directly
1076                  * call the abort task interface for cleanup.
1077                  */
1078                 perform_termination = 1;
1079
1080                 if (isci_device && !test_bit(IDEV_GONE, &isci_device->flags) &&
1081                     (isci_remote_device_suspend(isci_host, isci_device)
1082                      != SCI_SUCCESS)) {
1083                         dev_warn(&isci_host->pdev->dev,
1084                                 "%s:  device = %p; failed to suspend\n",
1085                                 __func__, isci_device);
1086                         goto out;
1087                 }
1088
1089         } else {
1090                 /* Fill in the tmf stucture */
1091                 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1092                                                isci_abort_task_process_cb,
1093                                                old_request);
1094
1095                 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1096
1097                 if (isci_remote_device_suspend(isci_host, isci_device)
1098                     != SCI_SUCCESS) {
1099                         dev_warn(&isci_host->pdev->dev,
1100                                 "%s:  device = %p; failed to suspend\n",
1101                                 __func__, isci_device);
1102                         goto out;
1103                 }
1104
1105                 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
1106                 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1107                                             ISCI_ABORT_TASK_TIMEOUT_MS);
1108
1109                 if (ret == TMF_RESP_FUNC_COMPLETE)
1110                         perform_termination = 1;
1111                 else
1112                         dev_warn(&isci_host->pdev->dev,
1113                                 "%s: isci_task_send_tmf failed\n", __func__);
1114         }
1115         if (perform_termination) {
1116                 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1117
1118                 /* Clean up the request on our side, and wait for the aborted
1119                  * I/O to complete.
1120                  */
1121                 isci_terminate_request_core(isci_host, isci_device,
1122                                             old_request);
1123                 isci_remote_device_resume(isci_host, isci_device, NULL, NULL);
1124         }
1125
1126         /* Make sure we do not leave a reference to aborted_io_completion */
1127         old_request->io_request_completion = NULL;
1128  out:
1129         isci_put_device(isci_device);
1130         return ret;
1131 }
1132
1133 /**
1134  * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1135  *    functions. This is one of the Task Management functoins called by libsas,
1136  *    to abort all task for the given lun.
1137  * @d_device: This parameter specifies the domain device associated with this
1138  *    request.
1139  * @lun: This parameter specifies the lun associated with this request.
1140  *
1141  * status, zero indicates success.
1142  */
1143 int isci_task_abort_task_set(
1144         struct domain_device *d_device,
1145         u8 *lun)
1146 {
1147         return TMF_RESP_FUNC_FAILED;
1148 }
1149
1150
1151 /**
1152  * isci_task_clear_aca() - This function is one of the SAS Domain Template
1153  *    functions. This is one of the Task Management functoins called by libsas.
1154  * @d_device: This parameter specifies the domain device associated with this
1155  *    request.
1156  * @lun: This parameter specifies the lun        associated with this request.
1157  *
1158  * status, zero indicates success.
1159  */
1160 int isci_task_clear_aca(
1161         struct domain_device *d_device,
1162         u8 *lun)
1163 {
1164         return TMF_RESP_FUNC_FAILED;
1165 }
1166
1167
1168
1169 /**
1170  * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1171  *    functions. This is one of the Task Management functoins called by libsas.
1172  * @d_device: This parameter specifies the domain device associated with this
1173  *    request.
1174  * @lun: This parameter specifies the lun        associated with this request.
1175  *
1176  * status, zero indicates success.
1177  */
1178 int isci_task_clear_task_set(
1179         struct domain_device *d_device,
1180         u8 *lun)
1181 {
1182         return TMF_RESP_FUNC_FAILED;
1183 }
1184
1185
1186 /**
1187  * isci_task_query_task() - This function is implemented to cause libsas to
1188  *    correctly escalate the failed abort to a LUN or target reset (this is
1189  *    because sas_scsi_find_task libsas function does not correctly interpret
1190  *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1191  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1192  *    returned, libsas will turn this into a target reset
1193  * @task: This parameter specifies the sas task being queried.
1194  * @lun: This parameter specifies the lun associated with this request.
1195  *
1196  * status, zero indicates success.
1197  */
1198 int isci_task_query_task(
1199         struct sas_task *task)
1200 {
1201         /* See if there is a pending device reset for this device. */
1202         if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1203                 return TMF_RESP_FUNC_FAILED;
1204         else
1205                 return TMF_RESP_FUNC_SUCC;
1206 }
1207
1208 /*
1209  * isci_task_request_complete() - This function is called by the sci core when
1210  *    an task request completes.
1211  * @ihost: This parameter specifies the ISCI host object
1212  * @ireq: This parameter is the completed isci_request object.
1213  * @completion_status: This parameter specifies the completion status from the
1214  *    sci core.
1215  *
1216  * none.
1217  */
1218 void
1219 isci_task_request_complete(struct isci_host *ihost,
1220                            struct isci_request *ireq,
1221                            enum sci_task_status completion_status)
1222 {
1223         struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1224         struct completion *tmf_complete = NULL;
1225         struct completion *request_complete = ireq->io_request_completion;
1226
1227         dev_dbg(&ihost->pdev->dev,
1228                 "%s: request = %p, status=%d\n",
1229                 __func__, ireq, completion_status);
1230
1231         isci_request_change_state(ireq, completed);
1232
1233         set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1234
1235         if (tmf) {
1236                 tmf->status = completion_status;
1237
1238                 if (tmf->proto == SAS_PROTOCOL_SSP) {
1239                         memcpy(&tmf->resp.resp_iu,
1240                                &ireq->ssp.rsp,
1241                                SSP_RESP_IU_MAX_SIZE);
1242                 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1243                         memcpy(&tmf->resp.d2h_fis,
1244                                &ireq->stp.rsp,
1245                                sizeof(struct dev_to_host_fis));
1246                 }
1247                 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1248                 tmf_complete = tmf->complete;
1249         }
1250         sci_controller_complete_io(ihost, ireq->target_device, ireq);
1251         /* set the 'terminated' flag handle to make sure it cannot be terminated
1252          *  or completed again.
1253          */
1254         set_bit(IREQ_TERMINATED, &ireq->flags);
1255
1256         /* As soon as something is in the terminate path, deallocation is
1257          * managed there.  Note that the final non-managed state of a task
1258          * request is "completed".
1259          */
1260         if ((ireq->status == completed) ||
1261             !isci_request_is_dealloc_managed(ireq->status)) {
1262                 isci_request_change_state(ireq, unallocated);
1263                 isci_free_tag(ihost, ireq->io_tag);
1264                 list_del_init(&ireq->dev_node);
1265         }
1266
1267         /* "request_complete" is set if the task was being terminated. */
1268         if (request_complete)
1269                 complete(request_complete);
1270
1271         /* The task management part completes last. */
1272         if (tmf_complete)
1273                 complete(tmf_complete);
1274 }
1275
1276 static int isci_reset_device(struct isci_host *ihost,
1277                              struct domain_device *dev,
1278                              struct isci_remote_device *idev)
1279 {
1280         int rc;
1281         enum sci_status status;
1282         struct sas_phy *phy = sas_get_local_phy(dev);
1283         struct isci_port *iport = dev->port->lldd_port;
1284
1285         dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1286
1287         if (isci_remote_device_reset(ihost, idev) != SCI_SUCCESS) {
1288                 rc = TMF_RESP_FUNC_FAILED;
1289                 goto out;
1290         }
1291
1292         if (scsi_is_sas_phy_local(phy)) {
1293                 struct isci_phy *iphy = &ihost->phys[phy->number];
1294
1295                 rc = isci_port_perform_hard_reset(ihost, iport, iphy);
1296         } else
1297                 rc = sas_phy_reset(phy, !dev_is_sata(dev));
1298
1299         /* Terminate in-progress I/O now. */
1300         isci_remote_device_nuke_requests(ihost, idev);
1301
1302         /* Since all pending TCs have been cleaned, resume the RNC. */
1303         status = isci_remote_device_reset_complete(ihost, idev);
1304
1305         if (status != SCI_SUCCESS)
1306                 dev_dbg(&ihost->pdev->dev,
1307                          "%s: isci_remote_device_reset_complete(%p) "
1308                          "returned %d!\n", __func__, idev, status);
1309
1310         dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1311  out:
1312         sas_put_local_phy(phy);
1313         return rc;
1314 }
1315
1316 int isci_task_I_T_nexus_reset(struct domain_device *dev)
1317 {
1318         struct isci_host *ihost = dev_to_ihost(dev);
1319         struct isci_remote_device *idev;
1320         unsigned long flags;
1321         int ret;
1322
1323         spin_lock_irqsave(&ihost->scic_lock, flags);
1324         idev = isci_get_device(dev);
1325         spin_unlock_irqrestore(&ihost->scic_lock, flags);
1326
1327         if (!idev) {
1328                 /* XXX: need to cleanup any ireqs targeting this
1329                  * domain_device
1330                  */
1331                 ret = TMF_RESP_FUNC_COMPLETE;
1332                 goto out;
1333         }
1334
1335         ret = isci_reset_device(ihost, dev, idev);
1336  out:
1337         isci_put_device(idev);
1338         return ret;
1339 }