forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathscsi_lib.c
3345 lines (2942 loc) · 84 KB
/
scsi_lib.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (C) 1999 Eric Youngdale
* Copyright (C) 2014 Christoph Hellwig
*
* SCSI queueing library.
* Initial versions: Eric Youngdale ([email protected]).
* Based upon conversations with large numbers
* of people at Linux Expo.
*/
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_dh.h>
#include <trace/events/scsi.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
#define SG_MEMPOOL_SIZE 2
struct scsi_host_sg_pool {
size_t size;
char *name;
struct kmem_cache *slab;
mempool_t *pool;
};
#define SP(x) { .size = x, "sgpool-" __stringify(x) }
#if (SCSI_MAX_SG_SEGMENTS < 32)
#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
#endif
static struct scsi_host_sg_pool scsi_sg_pools[] = {
SP(8),
SP(16),
#if (SCSI_MAX_SG_SEGMENTS > 32)
SP(32),
#if (SCSI_MAX_SG_SEGMENTS > 64)
SP(64),
#if (SCSI_MAX_SG_SEGMENTS > 128)
SP(128),
#if (SCSI_MAX_SG_SEGMENTS > 256)
#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
#endif
#endif
#endif
#endif
SP(SCSI_MAX_SG_SEGMENTS)
};
#undef SP
struct kmem_cache *scsi_sdb_cache;
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
* not change behaviour from the previous unplug mechanism, experimentation
* may prove this needs changing.
*/
#define SCSI_QUEUE_DELAY 3
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct scsi_target *starget = scsi_target(device);
/*
* Set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
*
* Note that the prior mid-layer assumption that any host could
* always queue at least one command is now broken. The mid-layer
* will implement a user specifiable stall (see
* scsi_host.max_host_blocked and scsi_device.max_device_blocked)
* if a command is requeued with no other commands outstanding
* either for the device or for the host.
*/
switch (reason) {
case SCSI_MLQUEUE_HOST_BUSY:
atomic_set(&host->host_blocked, host->max_host_blocked);
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
case SCSI_MLQUEUE_EH_RETRY:
atomic_set(&device->device_blocked,
device->max_device_blocked);
break;
case SCSI_MLQUEUE_TARGET_BUSY:
atomic_set(&starget->target_blocked,
starget->max_target_blocked);
break;
}
}
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request_queue *q = cmd->request->q;
blk_mq_requeue_request(cmd->request);
blk_mq_kick_requeue_list(q);
put_device(&sdev->sdev_gendev);
}
/**
* __scsi_queue_insert - private queue insertion
* @cmd: The SCSI command being requeued
* @reason: The reason for the requeue
* @unbusy: Whether the queue should be unbusied
*
* This is a private queue insertion. The public interface
* scsi_queue_insert() always assumes the queue should be unbusied
* because it's always called before the completion. This function is
* for a requeue after completion, which should only occur in this
* file.
*/
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
unsigned long flags;
SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
"Inserting command %p into mlqueue\n", cmd));
scsi_set_blocked(cmd, reason);
/*
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
if (unbusy)
scsi_device_unbusy(device);
/*
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
* before blk_cleanup_queue() finishes.
*/
cmd->result = 0;
if (q->mq_ops) {
scsi_mq_requeue_cmd(cmd);
return;
}
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
kblockd_schedule_work(&device->requeue_work);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
* Function: scsi_queue_insert()
*
* Purpose: Insert a command in the midlevel queue.
*
* Arguments: cmd - command that we are adding to queue.
* reason - why we are inserting command to queue.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing.
*
* Notes: We do this for one of two cases. Either the host is busy
* and it cannot accept any more commands for the time being,
* or the device returned QUEUE_FULL and can accept no more
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
__scsi_queue_insert(cmd, reason, 1);
}
/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
* @sense: optional sense buffer
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: or into request flags;
* @resid: optional residual length
*
* returns the req->errors value which is the scsi_cmnd result
* field.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, u64 flags,
int *resid)
{
struct request *req;
int write = (data_direction == DMA_TO_DEVICE);
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
if (IS_ERR(req))
return ret;
blk_rq_set_block_pc(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM))
goto out;
req->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(req->cmd, cmd, req->cmd_len);
req->sense = sense;
req->sense_len = 0;
req->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
/*
* head injection *required* here otherwise quiesce won't work
*/
blk_execute_rq(req->q, NULL, req, 1);
/*
* Some devices (USB mass-storage in particular) may transfer
* garbage data together with a residue indicating that the data
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
if (resid)
*resid = req->resid_len;
ret = req->errors;
out:
blk_put_request(req);
return ret;
}
EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
int *resid, u64 flags)
{
char *sense = NULL;
int result;
if (sshdr) {
sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
if (!sense)
return DRIVER_ERROR << 24;
}
result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
sense, timeout, retries, flags, resid);
if (sshdr)
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
kfree(sense);
return result;
}
EXPORT_SYMBOL(scsi_execute_req_flags);
/*
* Function: scsi_init_cmd_errh()
*
* Purpose: Initialize cmd fields related to error handling.
*
* Arguments: cmd - command that is ready to be queued.
*
* Notes: This function has the job of initializing a number of
* fields related to error handling. Typically this will
* be called once for each command, as required.
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
cmd->serial_number = 0;
scsi_set_resid(cmd, 0);
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (cmd->cmd_len == 0)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
void scsi_device_unbusy(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
atomic_dec(&shost->host_busy);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled))) {
spin_lock_irqsave(shost->host_lock, flags);
scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
atomic_dec(&sdev->device_busy);
}
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
blk_mq_start_hw_queues(q);
else
blk_run_queue(q);
}
/*
* Called for single_lun devices on IO completion. Clear starget_sdev_user,
* and call blk_run_queue for all the scsi_devices on the target -
* including current_sdev first.
*
* Called with *no* scsi locks held.
*/
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
struct Scsi_Host *shost = current_sdev->host;
struct scsi_device *sdev, *tmp;
struct scsi_target *starget = scsi_target(current_sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
starget->starget_sdev_user = NULL;
spin_unlock_irqrestore(shost->host_lock, flags);
/*
* Call blk_run_queue for all LUNs on the target, starting with
* current_sdev. We race with others (to set starget_sdev_user),
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
scsi_kick_queue(current_sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user)
goto out;
list_for_each_entry_safe(sdev, tmp, &starget->devices,
same_target_siblings) {
if (sdev == current_sdev)
continue;
if (scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
scsi_device_put(sdev);
}
out:
spin_unlock_irqrestore(shost->host_lock, flags);
}
static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{
if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
return true;
if (atomic_read(&sdev->device_blocked) > 0)
return true;
return false;
}
static inline bool scsi_target_is_busy(struct scsi_target *starget)
{
if (starget->can_queue > 0) {
if (atomic_read(&starget->target_busy) >= starget->can_queue)
return true;
if (atomic_read(&starget->target_blocked) > 0)
return true;
}
return false;
}
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
if (shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
return true;
return false;
}
static void scsi_starved_list_run(struct Scsi_Host *shost)
{
LIST_HEAD(starved_list);
struct scsi_device *sdev;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
struct request_queue *slq;
/*
* As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn
* drops the queue_lock and can add us back to the
* starved_list.
*
* host_lock protects the starved_list and starved_entry.
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
if (scsi_host_is_busy(shost))
break;
sdev = list_entry(starved_list.next,
struct scsi_device, starved_entry);
list_del_init(&sdev->starved_entry);
if (scsi_target_is_busy(scsi_target(sdev))) {
list_move_tail(&sdev->starved_entry,
&shost->starved_list);
continue;
}
/*
* Once we drop the host lock, a racing scsi_remove_device()
* call may remove the sdev from the starved list and destroy
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
* blk_cleanup_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
* blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(slq);
blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags);
}
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags);
}
/*
* Function: scsi_run_queue()
*
* Purpose: Select a proper request queue to serve next
*
* Arguments: q - last request's queue
*
* Returns: Nothing
*
* Notes: The previous command was completely finished, start
* a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host);
if (q->mq_ops)
blk_mq_start_stopped_hw_queues(q, false);
else
blk_run_queue(q);
}
void scsi_requeue_run_queue(struct work_struct *work)
{
struct scsi_device *sdev;
struct request_queue *q;
sdev = container_of(work, struct scsi_device, requeue_work);
q = sdev->request_queue;
scsi_run_queue(q);
}
/*
* Function: scsi_requeue_command()
*
* Purpose: Handle post-processing of completed commands.
*
* Arguments: q - queue to operate on
* cmd - command that may need to be requeued.
*
* Returns: Nothing
*
* Notes: After command completion, there may be blocks left
* over which weren't finished by the previous command
* this can be for a number of reasons - the main one is
* I/O errors in the middle of the request, in which case
* we need to request the blocks that come after the bad
* sector.
* Notes: Upon return, cmd is a stale pointer.
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *req = cmd->request;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_unprep_request(req);
req->special = NULL;
scsi_put_command(cmd);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
put_device(&sdev->sdev_gendev);
}
void scsi_run_host_queues(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, shost)
scsi_run_queue(sdev->request_queue);
}
static inline unsigned int scsi_sgtable_index(unsigned short nents)
{
unsigned int index;
BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
if (nents <= 8)
index = 0;
else
index = get_count_order(nents) - 3;
return index;
}
static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
{
struct scsi_host_sg_pool *sgp;
sgp = scsi_sg_pools + scsi_sgtable_index(nents);
mempool_free(sgl, sgp->pool);
}
static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
{
struct scsi_host_sg_pool *sgp;
sgp = scsi_sg_pools + scsi_sgtable_index(nents);
return mempool_alloc(sgp->pool, gfp_mask);
}
static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
{
if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
return;
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
}
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
{
struct scatterlist *first_chunk = NULL;
int ret;
BUG_ON(!nents);
if (mq) {
if (nents <= SCSI_MAX_SG_SEGMENTS) {
sdb->table.nents = sdb->table.orig_nents = nents;
sg_init_table(sdb->table.sgl, nents);
return 0;
}
first_chunk = sdb->table.sgl;
}
ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
first_chunk, GFP_ATOMIC, scsi_sg_alloc);
if (unlikely(ret))
scsi_free_sgtable(sdb, mq);
return ret;
}
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
if (cmd->request->cmd_type == REQ_TYPE_FS) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
drv->uninit_command(cmd);
}
}
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
scsi_free_sgtable(&cmd->sdb, true);
if (cmd->request->next_rq && cmd->request->next_rq->special)
scsi_free_sgtable(cmd->request->next_rq->special, true);
if (scsi_prot_sg_count(cmd))
scsi_free_sgtable(cmd->prot_sdb, true);
}
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
if (shost->use_cmd_list) {
BUG_ON(list_empty(&cmd->list));
spin_lock_irqsave(&sdev->list_lock, flags);
list_del_init(&cmd->list);
spin_unlock_irqrestore(&sdev->list_lock, flags);
}
}
/*
* Function: scsi_release_buffers()
*
* Purpose: Free resources allocate for a scsi_command.
*
* Arguments: cmd - command that we are bailing.
*
* Lock status: Assumed that no lock is held upon entry.
*
* Returns: Nothing
*
* Notes: In the event that an upper level driver rejects a
* command, we must release resources allocated during
* the __init_io() function. Primarily this would involve
* the scatter-gather table.
*/
static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
scsi_free_sgtable(&cmd->sdb, false);
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
if (scsi_prot_sg_count(cmd))
scsi_free_sgtable(cmd->prot_sdb, false);
}
EXPORT_SYMBOL(scsi_release_buffers);
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
scsi_free_sgtable(bidi_sdb, false);
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
static bool scsi_end_request(struct request *req, int error,
unsigned int bytes, unsigned int bidi_bytes)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
if (blk_update_request(req, error, bytes))
return true;
/* Bidi request must be completed as a whole */
if (unlikely(bidi_bytes) &&
blk_update_request(req->next_rq, error, bidi_bytes))
return true;
if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk);
if (req->mq_ctx) {
/*
* In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
* We also can't kick the queues from irq context, so we
* will have to defer it to a workqueue.
*/
scsi_mq_uninit_cmd(cmd);
__blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
blk_mq_start_stopped_hw_queues(q, true);
} else {
unsigned long flags;
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_release_buffers(cmd);
scsi_put_command(cmd);
scsi_run_queue(q);
}
put_device(&sdev->sdev_gendev);
return false;
}
/**
* __scsi_error_from_host_byte - translate SCSI error code into errno
* @cmd: SCSI command (unused)
* @result: scsi error code
*
* Translate SCSI error code into standard UNIX errno.
* Return values:
* -ENOLINK temporary transport failure
* -EREMOTEIO permanent target failure, do not retry
* -EBADE permanent nexus failure, retry on other path
* -ENOSPC No write space available
* -ENODATA Medium error
* -EIO unspecified I/O error
*/
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
{
int error = 0;
switch(host_byte(result)) {
case DID_TRANSPORT_FAILFAST:
error = -ENOLINK;
break;
case DID_TARGET_FAILURE:
set_host_byte(cmd, DID_OK);
error = -EREMOTEIO;
break;
case DID_NEXUS_FAILURE:
set_host_byte(cmd, DID_OK);
error = -EBADE;
break;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
error = -ENOSPC;
break;
case DID_MEDIUM_ERROR:
set_host_byte(cmd, DID_OK);
error = -ENODATA;
break;
default:
error = -EIO;
break;
}
return error;
}
/*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
*
* Arguments: cmd - command that is finished.
*
* Lock status: Assumed that no lock is held upon entry.
*
* Returns: Nothing
*
* Notes: We will finish off the specified number of sectors. If we
* are done, the command block will be released and the queue
* function will be goosed. If we are not done then we have to
* figure out what to do next:
*
* a) We can call scsi_requeue_command(). The request
* will be unprepared and put back on the queue. Then
* a new command will be created for it. This should
* be used if we made forward progress, or if we want
* to switch from READ(10) to READ(6) for example.
*
* b) We can call __scsi_queue_insert(). The request will
* be put back on the queue and retried using the same
* command as before, possibly after a delay.
*
* c) We can call scsi_end_request() with -EIO to fail
* the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
struct scsi_sense_hdr sshdr;
bool sense_valid = false;
int sense_deferred = 0, level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
if (sense_valid)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
if (result) {
if (sense_valid && req->sense) {
/*
* SG_IO wants current and deferred errors
*/
int len = 8 + cmd->sense_buffer[7];
if (len > SCSI_SENSE_BUFFERSIZE)
len = SCSI_SENSE_BUFFERSIZE;
memcpy(req->sense, cmd->sense_buffer, len);
req->sense_len = len;
}
if (!sense_deferred)
error = __scsi_error_from_host_byte(cmd, result);
}
/*
* __scsi_error_from_host_byte may have reset the host_byte
*/
req->errors = cmd->result;
req->resid_len = scsi_get_resid(cmd);
if (scsi_bidi_cmnd(cmd)) {
/*
* Bidi commands Must be complete as a whole,
* both sides at once.
*/
req->next_rq->resid_len = scsi_in(cmd)->resid;
if (scsi_end_request(req, 0, blk_rq_bytes(req),
blk_rq_bytes(req->next_rq)))
BUG();
return;
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
/*
* Certain non BLOCK_PC requests are commands that don't
* actually transfer anything (FLUSH), so cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
error = __scsi_error_from_host_byte(cmd, result);
}
/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
BUG_ON(blk_bidi_rq(req));
/*
* Next deal with any sectors which we were able to correctly
* handle.
*/
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
"%u sectors total, %d bytes done.\n",
blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated
* as success, so fiddle the result code here. For BLOCK_PC
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
* print since caller wants ATA registers. Only occurs on
* SCSI ATA PASS_THROUGH commands when CK_COND=1
*/
if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
;
else if (!(req->cmd_flags & REQ_QUIET))
scsi_print_sense(cmd);
result = 0;
/* BLOCK_PC may have set error */
error = 0;
}
/*
* If we finished all bytes in the request we are done now.
*/
if (!scsi_end_request(req, error, good_bytes, 0))
return;
/*
* Kill remainder if no retrys.
*/
if (error && scsi_noretry_cmd(cmd)) {
if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
BUG();
return;
}
/*
* If there had been no error, but we have leftover bytes in the
* requeues just queue the command up again.
*/
if (result == 0)
goto requeue;
error = __scsi_error_from_host_byte(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
* reasons. Just retry the command and see what
* happens.
*/
action = ACTION_RETRY;
} else if (sense_valid && !sense_deferred) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
/* Detected disc change. Set a bit
* and quietly refuse further access.
*/
cmd->device->changed = 1;
action = ACTION_FAIL;
} else {
/* Must have been a power glitch, or a
* bus reset. Could not have been a
* media change, so we just retry the
* command and see what happens.
*/
action = ACTION_RETRY;
}
break;
case ILLEGAL_REQUEST:
/* If we had an ILLEGAL REQUEST returned, then
* we may have performed an unsupported
* command. The only thing this should be
* would be a ten byte read where only a six
* byte read was supported. Also, on a system
* where READ CAPACITY failed, we may have
* read past the end of the disk.
*/
if ((cmd->device->use_10_for_rw &&
sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
(cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == WRITE_10)) {
/* This will issue a new 6-byte command. */
cmd->device->use_10_for_rw = 0;
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
action = ACTION_FAIL;
error = -EILSEQ;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
action = ACTION_FAIL;
error = -EREMOTEIO;
} else
action = ACTION_FAIL;
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
if (sshdr.asc == 0x10) /* DIF */
error = -EILSEQ;
break;
case NOT_READY:
/* If the device is in the process of becoming
* ready, or has a temporary blockage, retry.
*/
if (sshdr.asc == 0x04) {
switch (sshdr.ascq) {
case 0x01: /* becoming ready */
case 0x04: /* format in progress */