Merge branch 'acpi-bind'
[firefly-linux-kernel-4.4.55.git] / drivers / dma / ste_dma40.c
index 71bf4ec300ea5ca10958c0a43f71c9e9175b7879..5ab5880d5c9041203bdb38a4d242c888777e8f2d 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
 #include <linux/amba/bus.h>
 #include <linux/regulator/consumer.h>
 #include <linux/platform_data/dma-ste-dma40.h>
 #define D40_LCLA_LINK_PER_EVENT_GRP 128
 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
 
+/* Max number of logical channels per physical channel */
+#define D40_MAX_LOG_CHAN_PER_PHY 32
+
 /* Attempts before giving up to trying to get pages that are aligned */
 #define MAX_LCLA_ALLOC_ATTEMPTS 256
 
 /* Bit markings for allocation map */
-#define D40_ALLOC_FREE         (1 << 31)
-#define D40_ALLOC_PHY          (1 << 30)
+#define D40_ALLOC_FREE         BIT(31)
+#define D40_ALLOC_PHY          BIT(30)
 #define D40_ALLOC_LOG_FREE     0
 
-#define MAX(a, b) (((a) < (b)) ? (b) : (a))
+#define D40_MEMCPY_MAX_CHANS   8
+
+/* Reserved event lines for memcpy only. */
+#define DB8500_DMA_MEMCPY_EV_0 51
+#define DB8500_DMA_MEMCPY_EV_1 56
+#define DB8500_DMA_MEMCPY_EV_2 57
+#define DB8500_DMA_MEMCPY_EV_3 58
+#define DB8500_DMA_MEMCPY_EV_4 59
+#define DB8500_DMA_MEMCPY_EV_5 60
+
+static int dma40_memcpy_channels[] = {
+       DB8500_DMA_MEMCPY_EV_0,
+       DB8500_DMA_MEMCPY_EV_1,
+       DB8500_DMA_MEMCPY_EV_2,
+       DB8500_DMA_MEMCPY_EV_3,
+       DB8500_DMA_MEMCPY_EV_4,
+       DB8500_DMA_MEMCPY_EV_5,
+};
+
+/* Default configuration for physcial memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
+       .mode = STEDMA40_MODE_PHYSICAL,
+       .dir = DMA_MEM_TO_MEM,
+
+       .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+       .src_info.psize = STEDMA40_PSIZE_PHY_1,
+       .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+       .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+       .dst_info.psize = STEDMA40_PSIZE_PHY_1,
+       .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+};
+
+/* Default configuration for logical memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
+       .mode = STEDMA40_MODE_LOGICAL,
+       .dir = DMA_MEM_TO_MEM,
+
+       .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+       .src_info.psize = STEDMA40_PSIZE_LOG_1,
+       .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+       .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+       .dst_info.psize = STEDMA40_PSIZE_LOG_1,
+       .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+};
 
 /**
  * enum 40_command - The different commands and/or statuses.
@@ -171,6 +221,9 @@ static u32 d40_backup_regs_chan[] = {
        D40_CHAN_REG_SDLNK,
 };
 
+#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
+                            BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
+
 /**
  * struct d40_interrupt_lookup - lookup table for interrupt handler
  *
@@ -471,6 +524,8 @@ struct d40_gen_dmac {
  * @phy_start: Physical memory start of the DMA registers.
  * @phy_size: Size of the DMA register map.
  * @irq: The IRQ number.
+ * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
+ * transfers).
  * @num_phy_chans: The number of physical channels. Read from HW. This
  * is the number of available channels for this driver, not counting "Secure
  * mode" allocated physical channels.
@@ -514,6 +569,7 @@ struct d40_base {
        phys_addr_t                       phy_start;
        resource_size_t                   phy_size;
        int                               irq;
+       int                               num_memcpy_chans;
        int                               num_phy_chans;
        int                               num_log_chans;
        struct device_dma_parameters      dma_parms;
@@ -534,7 +590,7 @@ struct d40_base {
        resource_size_t                   lcpa_size;
        struct kmem_cache                *desc_slab;
        u32                               reg_val_backup[BACKUP_REGS_SZ];
-       u32                               reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
+       u32                               reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
        u32                              *reg_val_backup_chan;
        u16                               gcc_pwr_off_mask;
        bool                              initialized;
@@ -792,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
                 * that uses linked lists.
                 */
                if (!(chan->phy_chan->use_soft_lli &&
-                       chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
+                       chan->dma_cfg.dir == DMA_DEV_TO_MEM))
                        curr_lcla = d40_lcla_alloc_one(chan, desc);
 
                first_lcla = curr_lcla;
@@ -954,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize)
 
 /*
  * The dma only supports transmitting packages up to
- * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
- * dma elements required to send the entire sg list
+ * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
+ *
+ * Calculate the total number of dma elements required to send the entire sg list.
  */
 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
 {
        int dmalen;
        u32 max_w = max(data_width1, data_width2);
        u32 min_w = min(data_width1, data_width2);
-       u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+       u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
 
        if (seg_max > STEDMA40_MAX_SEG_SIZE)
-               seg_max -= (1 << max_w);
+               seg_max -= max_w;
 
-       if (!IS_ALIGNED(size, 1 << max_w))
+       if (!IS_ALIGNED(size, max_w))
                return -EINVAL;
 
        if (size <= seg_max)
@@ -1257,21 +1314,17 @@ static void __d40_config_set_event(struct d40_chan *d40c,
 static void d40_config_set_event(struct d40_chan *d40c,
                                 enum d40_events event_type)
 {
-       /* Enable event line connected to device (or memcpy) */
-       if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
-           (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
-               u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+       u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
 
+       /* Enable event line connected to device (or memcpy) */
+       if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
+           (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
                __d40_config_set_event(d40c, event_type, event,
                                       D40_CHAN_REG_SSLNK);
-       }
-
-       if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
-               u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
 
+       if (d40c->dma_cfg.dir !=  DMA_DEV_TO_MEM)
                __d40_config_set_event(d40c, event_type, event,
                                       D40_CHAN_REG_SDLNK);
-       }
 }
 
 static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1417,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c)
                          >> D40_SREG_ELEM_PHY_ECNT_POS;
        }
 
-       return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+       return num_elt * d40c->dma_cfg.dst_info.data_width;
 }
 
 static bool d40_tx_is_linked(struct d40_chan *d40c)
@@ -1693,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
                }
 
                /* ACK interrupt */
-               writel(1 << idx, base->virtbase + il[row].clr);
+               writel(BIT(idx), base->virtbase + il[row].clr);
 
                spin_lock(&d40c->lock);
 
@@ -1715,8 +1768,6 @@ static int d40_validate_conf(struct d40_chan *d40c,
                             struct stedma40_chan_cfg *conf)
 {
        int res = 0;
-       u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
-       u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
        bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
 
        if (!conf->dir) {
@@ -1724,48 +1775,14 @@ static int d40_validate_conf(struct d40_chan *d40c,
                res = -EINVAL;
        }
 
-       if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
-           d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
-           d40c->runtime_addr == 0) {
-
-               chan_err(d40c, "Invalid TX channel address (%d)\n",
-                        conf->dst_dev_type);
-               res = -EINVAL;
-       }
-
-       if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
-           d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
-           d40c->runtime_addr == 0) {
-               chan_err(d40c, "Invalid RX channel address (%d)\n",
-                       conf->src_dev_type);
-               res = -EINVAL;
-       }
-
-       if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
-           dst_event_group == STEDMA40_DEV_DST_MEMORY) {
-               chan_err(d40c, "Invalid dst\n");
+       if ((is_log && conf->dev_type > d40c->base->num_log_chans)  ||
+           (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
+           (conf->dev_type < 0)) {
+               chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
                res = -EINVAL;
        }
 
-       if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
-           src_event_group == STEDMA40_DEV_SRC_MEMORY) {
-               chan_err(d40c, "Invalid src\n");
-               res = -EINVAL;
-       }
-
-       if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
-           dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
-               chan_err(d40c, "No event line\n");
-               res = -EINVAL;
-       }
-
-       if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
-           (src_event_group != dst_event_group)) {
-               chan_err(d40c, "Invalid event group\n");
-               res = -EINVAL;
-       }
-
-       if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+       if (conf->dir == DMA_DEV_TO_DEV) {
                /*
                 * DMAC HW supports it. Will be added to this driver,
                 * in case any dma client requires it.
@@ -1775,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c,
        }
 
        if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
-           (1 << conf->src_info.data_width) !=
+           conf->src_info.data_width !=
            d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
-           (1 << conf->dst_info.data_width)) {
+           conf->dst_info.data_width) {
                /*
                 * The DMAC hardware only supports
                 * src (burst x width) == dst (burst x width)
@@ -1819,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
                if (phy->allocated_src == D40_ALLOC_FREE)
                        phy->allocated_src = D40_ALLOC_LOG_FREE;
 
-               if (!(phy->allocated_src & (1 << log_event_line))) {
-                       phy->allocated_src |= 1 << log_event_line;
+               if (!(phy->allocated_src & BIT(log_event_line))) {
+                       phy->allocated_src |= BIT(log_event_line);
                        goto found;
                } else
                        goto not_found;
@@ -1831,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
                if (phy->allocated_dst == D40_ALLOC_FREE)
                        phy->allocated_dst = D40_ALLOC_LOG_FREE;
 
-               if (!(phy->allocated_dst & (1 << log_event_line))) {
-                       phy->allocated_dst |= 1 << log_event_line;
+               if (!(phy->allocated_dst & BIT(log_event_line))) {
+                       phy->allocated_dst |= BIT(log_event_line);
                        goto found;
                } else
                        goto not_found;
@@ -1862,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
 
        /* Logical channel */
        if (is_src) {
-               phy->allocated_src &= ~(1 << log_event_line);
+               phy->allocated_src &= ~BIT(log_event_line);
                if (phy->allocated_src == D40_ALLOC_LOG_FREE)
                        phy->allocated_src = D40_ALLOC_FREE;
        } else {
-               phy->allocated_dst &= ~(1 << log_event_line);
+               phy->allocated_dst &= ~BIT(log_event_line);
                if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
                        phy->allocated_dst = D40_ALLOC_FREE;
        }
@@ -1882,7 +1899,7 @@ out:
 
 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
 {
-       int dev_type;
+       int dev_type = d40c->dma_cfg.dev_type;
        int event_group;
        int event_line;
        struct d40_phy_res *phys;
@@ -1896,14 +1913,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
        phys = d40c->base->phy_res;
        num_phy_chans = d40c->base->num_phy_chans;
 
-       if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
-               dev_type = d40c->dma_cfg.src_dev_type;
+       if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
                log_num = 2 * dev_type;
                is_src = true;
-       } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
-                  d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+       } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+                  d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
                /* dst event lines are used for logical memcpy */
-               dev_type = d40c->dma_cfg.dst_dev_type;
                log_num = 2 * dev_type + 1;
                is_src = false;
        } else
@@ -1913,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
        event_line = D40_TYPE_TO_EVENT(dev_type);
 
        if (!is_log) {
-               if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+               if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
                        /* Find physical half channel */
                        if (d40c->dma_cfg.use_fixed_channel) {
                                i = d40c->dma_cfg.phy_channel;
@@ -2014,14 +2029,23 @@ static int d40_config_memcpy(struct d40_chan *d40c)
        dma_cap_mask_t cap = d40c->chan.device->cap_mask;
 
        if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
-               d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
-               d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
-               d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
-                       memcpy[d40c->chan.chan_id];
+               d40c->dma_cfg = dma40_memcpy_conf_log;
+               d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
+
+               d40_log_cfg(&d40c->dma_cfg,
+                           &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
 
        } else if (dma_has_cap(DMA_MEMCPY, cap) &&
                   dma_has_cap(DMA_SLAVE, cap)) {
-               d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
+               d40c->dma_cfg = dma40_memcpy_conf_phy;
+
+               /* Generate interrrupt at end of transfer or relink. */
+               d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
+
+               /* Generate interrupt on error. */
+               d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
+               d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
+
        } else {
                chan_err(d40c, "No memcpy\n");
                return -EINVAL;
@@ -2034,7 +2058,7 @@ static int d40_free_dma(struct d40_chan *d40c)
 {
 
        int res = 0;
-       u32 event;
+       u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
        struct d40_phy_res *phy = d40c->phy_chan;
        bool is_src;
 
@@ -2052,14 +2076,12 @@ static int d40_free_dma(struct d40_chan *d40c)
                return -EINVAL;
        }
 
-       if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
-           d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
-               event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+       if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+           d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
                is_src = false;
-       } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
-               event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+       else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
                is_src = true;
-       else {
+       else {
                chan_err(d40c, "Unknown direction\n");
                return -EINVAL;
        }
@@ -2100,7 +2122,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
        unsigned long flags;
        void __iomem *active_reg;
        u32 status;
-       u32 event;
+       u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
 
        spin_lock_irqsave(&d40c->lock, flags);
 
@@ -2119,12 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c)
                goto _exit;
        }
 
-       if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
-           d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
-               event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+       if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+           d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
                status = readl(chanbase + D40_CHAN_REG_SDLNK);
-       } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
-               event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+       } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
                status = readl(chanbase + D40_CHAN_REG_SSLNK);
        } else {
                chan_err(d40c, "Unknown direction\n");
@@ -2255,24 +2275,6 @@ err:
        return NULL;
 }
 
-static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
-{
-       struct stedma40_platform_data *plat = chan->base->plat_data;
-       struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
-       dma_addr_t addr = 0;
-
-       if (chan->runtime_addr)
-               return chan->runtime_addr;
-
-       if (direction == DMA_DEV_TO_MEM)
-               addr = plat->dev_rx[cfg->src_dev_type];
-       else if (direction == DMA_MEM_TO_DEV)
-               addr = plat->dev_tx[cfg->dst_dev_type];
-
-       return addr;
-}
-
 static struct dma_async_tx_descriptor *
 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
            struct scatterlist *sg_dst, unsigned int sg_len,
@@ -2299,14 +2301,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
        if (sg_next(&sg_src[sg_len - 1]) == sg_src)
                desc->cyclic = true;
 
-       if (direction != DMA_TRANS_NONE) {
-               dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
-
-               if (direction == DMA_DEV_TO_MEM)
-                       src_dev_addr = dev_addr;
-               else if (direction == DMA_MEM_TO_DEV)
-                       dst_dev_addr = dev_addr;
-       }
+       if (direction == DMA_DEV_TO_MEM)
+               src_dev_addr = chan->runtime_addr;
+       else if (direction == DMA_MEM_TO_DEV)
+               dst_dev_addr = chan->runtime_addr;
 
        if (chan_is_logical(chan))
                ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
@@ -2366,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
        u32 rtreg;
        u32 event = D40_TYPE_TO_EVENT(dev_type);
        u32 group = D40_TYPE_TO_GROUP(dev_type);
-       u32 bit = 1 << event;
+       u32 bit = BIT(event);
        u32 prioreg;
        struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
 
@@ -2397,13 +2395,57 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
        if (d40c->base->rev < 3)
                return;
 
-       if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
-           (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
-               __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
+       if ((d40c->dma_cfg.dir ==  DMA_DEV_TO_MEM) ||
+           (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
+               __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
 
-       if ((d40c->dma_cfg.dir ==  STEDMA40_MEM_TO_PERIPH) ||
-           (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
-               __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
+       if ((d40c->dma_cfg.dir ==  DMA_MEM_TO_DEV) ||
+           (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
+               __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
+}
+
+#define D40_DT_FLAGS_MODE(flags)       ((flags >> 0) & 0x1)
+#define D40_DT_FLAGS_DIR(flags)        ((flags >> 1) & 0x1)
+#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
+#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
+
+static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
+                                 struct of_dma *ofdma)
+{
+       struct stedma40_chan_cfg cfg;
+       dma_cap_mask_t cap;
+       u32 flags;
+
+       memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
+
+       dma_cap_zero(cap);
+       dma_cap_set(DMA_SLAVE, cap);
+
+       cfg.dev_type = dma_spec->args[0];
+       flags = dma_spec->args[2];
+
+       switch (D40_DT_FLAGS_MODE(flags)) {
+       case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
+       case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
+       }
+
+       switch (D40_DT_FLAGS_DIR(flags)) {
+       case 0:
+               cfg.dir = DMA_MEM_TO_DEV;
+               cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
+               break;
+       case 1:
+               cfg.dir = DMA_DEV_TO_MEM;
+               cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
+               break;
+       }
+
+       if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
+               cfg.phy_channel = dma_spec->args[1];
+               cfg.use_fixed_channel = true;
+       }
+
+       return dma_request_channel(cap, stedma40_filter, &cfg);
 }
 
 /* DMA ENGINE functions */
@@ -2435,23 +2477,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
        }
 
        pm_runtime_get_sync(d40c->base->dev);
-       /* Fill in basic CFG register values */
-       d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
-                   &d40c->dst_def_cfg, chan_is_logical(d40c));
 
        d40_set_prio_realtime(d40c);
 
        if (chan_is_logical(d40c)) {
-               d40_log_cfg(&d40c->dma_cfg,
-                           &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
-
-               if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+               if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
                        d40c->lcpa = d40c->base->lcpa_base +
-                               d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
+                               d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
                else
                        d40c->lcpa = d40c->base->lcpa_base +
-                               d40c->dma_cfg.dst_dev_type *
+                               d40c->dma_cfg.dev_type *
                                D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
+
+               /* Unmask the Global Interrupt Mask. */
+               d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
+               d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
        }
 
        dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2641,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan)
 static int
 dma40_config_to_halfchannel(struct d40_chan *d40c,
                            struct stedma40_half_channel_info *info,
-                           enum dma_slave_buswidth width,
                            u32 maxburst)
 {
-       enum stedma40_periph_data_width addr_width;
        int psize;
 
-       switch (width) {
-       case DMA_SLAVE_BUSWIDTH_1_BYTE:
-               addr_width = STEDMA40_BYTE_WIDTH;
-               break;
-       case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               addr_width = STEDMA40_HALFWORD_WIDTH;
-               break;
-       case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               addr_width = STEDMA40_WORD_WIDTH;
-               break;
-       case DMA_SLAVE_BUSWIDTH_8_BYTES:
-               addr_width = STEDMA40_DOUBLEWORD_WIDTH;
-               break;
-       default:
-               dev_err(d40c->base->dev,
-                       "illegal peripheral address width "
-                       "requested (%d)\n",
-                       width);
-               return -EINVAL;
-       }
-
        if (chan_is_logical(d40c)) {
                if (maxburst >= 16)
                        psize = STEDMA40_PSIZE_LOG_16;
@@ -2688,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c,
                        psize = STEDMA40_PSIZE_PHY_1;
        }
 
-       info->data_width = addr_width;
        info->psize = psize;
        info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
 
@@ -2712,21 +2728,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
        dst_maxburst = config->dst_maxburst;
 
        if (config->direction == DMA_DEV_TO_MEM) {
-               dma_addr_t dev_addr_rx =
-                       d40c->base->plat_data->dev_rx[cfg->src_dev_type];
-
                config_addr = config->src_addr;
-               if (dev_addr_rx)
-                       dev_dbg(d40c->base->dev,
-                               "channel has a pre-wired RX address %08x "
-                               "overriding with %08x\n",
-                               dev_addr_rx, config_addr);
-               if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
+
+               if (cfg->dir != DMA_DEV_TO_MEM)
                        dev_dbg(d40c->base->dev,
                                "channel was not configured for peripheral "
                                "to memory transfer (%d) overriding\n",
                                cfg->dir);
-               cfg->dir = STEDMA40_PERIPH_TO_MEM;
+               cfg->dir = DMA_DEV_TO_MEM;
 
                /* Configure the memory side */
                if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2735,21 +2744,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                        dst_maxburst = src_maxburst;
 
        } else if (config->direction == DMA_MEM_TO_DEV) {
-               dma_addr_t dev_addr_tx =
-                       d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
-
                config_addr = config->dst_addr;
-               if (dev_addr_tx)
-                       dev_dbg(d40c->base->dev,
-                               "channel has a pre-wired TX address %08x "
-                               "overriding with %08x\n",
-                               dev_addr_tx, config_addr);
-               if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
+
+               if (cfg->dir != DMA_MEM_TO_DEV)
                        dev_dbg(d40c->base->dev,
                                "channel was not configured for memory "
                                "to peripheral transfer (%d) overriding\n",
                                cfg->dir);
-               cfg->dir = STEDMA40_MEM_TO_PERIPH;
+               cfg->dir = DMA_MEM_TO_DEV;
 
                /* Configure the memory side */
                if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2763,6 +2765,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                return -EINVAL;
        }
 
+       if (config_addr <= 0) {
+               dev_err(d40c->base->dev, "no address supplied\n");
+               return -EINVAL;
+       }
+
        if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
                dev_err(d40c->base->dev,
                        "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
@@ -2781,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
        }
 
+       /* Only valid widths are; 1, 2, 4 and 8. */
+       if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
+           src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
+           dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
+           dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
+           ((src_addr_width > 1) && (src_addr_width & 1)) ||
+           ((dst_addr_width > 1) && (dst_addr_width & 1)))
+               return -EINVAL;
+
+       cfg->src_info.data_width = src_addr_width;
+       cfg->dst_info.data_width = dst_addr_width;
+
        ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
-                                         src_addr_width,
                                          src_maxburst);
        if (ret)
                return ret;
 
        ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
-                                         dst_addr_width,
                                          dst_maxburst);
        if (ret)
                return ret;
@@ -2797,8 +2814,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
        if (chan_is_logical(d40c))
                d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
        else
-               d40_phy_cfg(cfg, &d40c->src_def_cfg,
-                           &d40c->dst_def_cfg, false);
+               d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
 
        /* These settings will take precedence later */
        d40c->runtime_addr = config_addr;
@@ -2929,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
        }
 
        d40_chan_init(base, &base->dma_memcpy, base->log_chans,
-                     base->num_log_chans, base->plat_data->memcpy_len);
+                     base->num_log_chans, base->num_memcpy_chans);
 
        dma_cap_zero(base->dma_memcpy.cap_mask);
        dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
@@ -3123,13 +3139,14 @@ static int __init d40_phy_res_init(struct d40_base *base)
 
 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
 {
-       struct stedma40_platform_data *plat_data;
+       struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
        struct clk *clk = NULL;
        void __iomem *virtbase = NULL;
        struct resource *res = NULL;
        struct d40_base *base = NULL;
        int num_log_chans = 0;
        int num_phy_chans;
+       int num_memcpy_chans;
        int clk_ret = -EINVAL;
        int i;
        u32 pid;
@@ -3189,8 +3206,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
         * DB8540v1 has revision 4
         */
        rev = AMBA_REV_BITS(pid);
-
-       plat_data = pdev->dev.platform_data;
+       if (rev < 2) {
+               d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
+               goto failure;
+       }
 
        /* The number of physical channels on this HW */
        if (plat_data->num_of_phy_chans)
@@ -3198,26 +3217,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
        else
                num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
 
-       dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
-                rev, res->start, num_phy_chans);
-
-       if (rev < 2) {
-               d40_err(&pdev->dev, "hardware revision: %d is not supported",
-                       rev);
-               goto failure;
-       }
+       /* The number of channels used for memcpy */
+       if (plat_data->num_of_memcpy_chans)
+               num_memcpy_chans = plat_data->num_of_memcpy_chans;
+       else
+               num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
 
-       /* Count the number of logical channels in use */
-       for (i = 0; i < plat_data->dev_len; i++)
-               if (plat_data->dev_rx[i] != 0)
-                       num_log_chans++;
+       num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
 
-       for (i = 0; i < plat_data->dev_len; i++)
-               if (plat_data->dev_tx[i] != 0)
-                       num_log_chans++;
+       dev_info(&pdev->dev,
+                "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
+                rev, res->start, num_phy_chans, num_log_chans);
 
        base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
-                      (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
+                      (num_phy_chans + num_log_chans + num_memcpy_chans) *
                       sizeof(struct d40_chan), GFP_KERNEL);
 
        if (base == NULL) {
@@ -3227,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
 
        base->rev = rev;
        base->clk = clk;
+       base->num_memcpy_chans = num_memcpy_chans;
        base->num_phy_chans = num_phy_chans;
        base->num_log_chans = num_log_chans;
        base->phy_start = res->start;
@@ -3278,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
        if (!base->lookup_phy_chans)
                goto failure;
 
-       if (num_log_chans + plat_data->memcpy_len) {
-               /*
-                * The max number of logical channels are event lines for all
-                * src devices and dst devices
-                */
-               base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
-                                                sizeof(struct d40_chan *),
-                                                GFP_KERNEL);
-               if (!base->lookup_log_chans)
-                       goto failure;
-       }
+       base->lookup_log_chans = kzalloc(num_log_chans *
+                                        sizeof(struct d40_chan *),
+                                        GFP_KERNEL);
+       if (!base->lookup_log_chans)
+               goto failure;
 
        base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
                                            sizeof(d40_backup_regs_chan),
@@ -3472,17 +3480,82 @@ failure:
        return ret;
 }
 
+static int __init d40_of_probe(struct platform_device *pdev,
+                              struct device_node *np)
+{
+       struct stedma40_platform_data *pdata;
+       int num_phy = 0, num_memcpy = 0, num_disabled = 0;
+       const const __be32 *list;
+
+       pdata = devm_kzalloc(&pdev->dev,
+                            sizeof(struct stedma40_platform_data),
+                            GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
+
+       /* If absent this value will be obtained from h/w. */
+       of_property_read_u32(np, "dma-channels", &num_phy);
+       if (num_phy > 0)
+               pdata->num_of_phy_chans = num_phy;
+
+       list = of_get_property(np, "memcpy-channels", &num_memcpy);
+       num_memcpy /= sizeof(*list);
+
+       if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
+               d40_err(&pdev->dev,
+                       "Invalid number of memcpy channels specified (%d)\n",
+                       num_memcpy);
+               return -EINVAL;
+       }
+       pdata->num_of_memcpy_chans = num_memcpy;
+
+       of_property_read_u32_array(np, "memcpy-channels",
+                                  dma40_memcpy_channels,
+                                  num_memcpy);
+
+       list = of_get_property(np, "disabled-channels", &num_disabled);
+       num_disabled /= sizeof(*list);
+
+       if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+               d40_err(&pdev->dev,
+                       "Invalid number of disabled channels specified (%d)\n",
+                       num_disabled);
+               return -EINVAL;
+       }
+
+       of_property_read_u32_array(np, "disabled-channels",
+                                  pdata->disabled_channels,
+                                  num_disabled);
+       pdata->disabled_channels[num_disabled] = -1;
+
+       pdev->dev.platform_data = pdata;
+
+       return 0;
+}
+
 static int __init d40_probe(struct platform_device *pdev)
 {
-       int err;
+       struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+       struct device_node *np = pdev->dev.of_node;
        int ret = -ENOENT;
-       struct d40_base *base;
+       struct d40_base *base = NULL;
        struct resource *res = NULL;
        int num_reserved_chans;
        u32 val;
 
-       base = d40_hw_detect_init(pdev);
+       if (!plat_data) {
+               if (np) {
+                       if(d40_of_probe(pdev, np)) {
+                               ret = -ENOMEM;
+                               goto failure;
+                       }
+               } else {
+                       d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
+                       goto failure;
+               }
+       }
 
+       base = d40_hw_detect_init(pdev);
        if (!base)
                goto failure;
 
@@ -3575,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev)
                base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
                if (IS_ERR(base->lcpa_regulator)) {
                        d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+                       ret = PTR_ERR(base->lcpa_regulator);
                        base->lcpa_regulator = NULL;
                        goto failure;
                }
@@ -3590,19 +3664,26 @@ static int __init d40_probe(struct platform_device *pdev)
        }
 
        base->initialized = true;
-       err = d40_dmaengine_init(base, num_reserved_chans);
-       if (err)
+       ret = d40_dmaengine_init(base, num_reserved_chans);
+       if (ret)
                goto failure;
 
        base->dev->dma_parms = &base->dma_parms;
-       err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
-       if (err) {
+       ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
+       if (ret) {
                d40_err(&pdev->dev, "Failed to set dma max seg size\n");
                goto failure;
        }
 
        d40_hw_init(base);
 
+       if (np) {
+               ret = of_dma_controller_register(np, d40_xlate, NULL);
+               if (ret)
+                       dev_err(&pdev->dev,
+                               "could not register of_dma_controller\n");
+       }
+
        dev_info(base->dev, "initialized\n");
        return 0;
 
@@ -3656,11 +3737,17 @@ failure:
        return ret;
 }
 
+static const struct of_device_id d40_match[] = {
+        { .compatible = "stericsson,dma40", },
+        {}
+};
+
 static struct platform_driver d40_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name  = D40_NAME,
                .pm = DMA40_PM_OPS,
+               .of_match_table = d40_match,
        },
 };