return;
}
- /* Find the first not transferred desciptor */
+ /* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
if (desc->mark == DESC_SUBMITTED) {
dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
__le32 word3;
};
-/* RFD desciptor */
+/* RFD descriptor */
struct atl1c_rx_free_desc {
__le64 buffer_addr;
};
struct atl1c_buffer *buffer_info;
};
-/* receive return desciptor (rrd) ring */
+/* receive return descriptor (rrd) ring */
struct atl1c_rrd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
/* GEM requires that RX descriptors are provided four at a time,
* aligned. Also, the RX ring may not wrap around. This means that
- * there will be at least 4 unused desciptor entries in the middle
+ * there will be at least 4 unused descriptor entries in the middle
* of the RX ring at all times.
*
* Similar to HME, GEM assumes that it can write garbage bytes before
/************************ Target Mode Definitions *****************************/
/*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
/************************ Target Mode Definitions *****************************/
/*
- * Connection desciptor for select-in requests in target mode.
+ * Connection descriptor for select-in requests in target mode.
*/
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
/*
- * Return the hub's desciptor
+ * Return the hub's descriptor
*
* NOTE: almost cut and paste from ehci-hub.c
*