.llseek = noop_llseek,
};
+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers. Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+ return scsi_sg_count(cmd) != 1 ||
+ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
/* This function will complete an aen request from the isr */
static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
{
}
/* Now complete the io */
- scsi_dma_unmap(cmd);
+ if (twa_command_mapped(cmd))
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
struct scsi_cmnd *cmd = tw_dev->srb[i];
cmd->result = (DID_RESET << 16);
- scsi_dma_unmap(cmd);
+ if (twa_command_mapped(cmd))
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
}
}
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
- scsi_dma_unmap(SCpnt);
+ if (twa_command_mapped(SCpnt))
+ scsi_dma_unmap(SCpnt);
twa_free_request_id(tw_dev, request_id);
break;
case 1:
SCpnt->result = (DID_ERROR << 16);
- scsi_dma_unmap(SCpnt);
+ if (twa_command_mapped(SCpnt))
+ scsi_dma_unmap(SCpnt);
done(SCpnt);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
/* Map sglist from scsi layer to cmd packet */
if (scsi_sg_count(srb)) {
- if ((scsi_sg_count(srb) == 1) &&
- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+ if (!twa_command_mapped(srb)) {
if (srb->sc_data_direction == DMA_TO_DEVICE ||
srb->sc_data_direction == DMA_BIDIRECTIONAL)
scsi_sg_copy_to_buffer(srb,
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+ if (!twa_command_mapped(cmd) &&
(cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
if (scsi_sg_count(cmd) == 1) {