This patch ensures a fair distribution of buffers, when two AIMs share a
single channel. The AIMs then won't be able to use more than half of all
pre-allocated buffers of the linked channel. However, in case the channel
is not shared, the AIM can exclusively use all available buffers.
Signed-off-by: Andrey Shvetsov <andrey.shvetsov@k2l.de>
Signed-off-by: Christian Gromm <christian.gromm@microchip.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
}
mutex_unlock(&channel->io_mutex);
}
mutex_unlock(&channel->io_mutex);
- mbo = most_get_mbo(channel->iface, channel->channel_id);
+ mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
if (!mbo) {
if ((filp->f_flags & O_NONBLOCK))
if (!mbo) {
if ((filp->f_flags & O_NONBLOCK))
if (wait_event_interruptible(
channel->wq,
(mbo = most_get_mbo(channel->iface,
if (wait_event_interruptible(
channel->wq,
(mbo = most_get_mbo(channel->iface,
- channel->channel_id)) ||
+ channel->channel_id,
+ &cdev_aim)) ||
(!channel->dev)))
return -ERESTARTSYS;
}
(!channel->dev)))
return -ERESTARTSYS;
}
- mbo = most_get_mbo(nd->iface, nd->tx.ch_id);
+ mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
if (!mbo) {
netif_stop_queue(dev);
if (!mbo) {
netif_stop_queue(dev);
wait_event_interruptible(
channel->playback_waitq,
kthread_should_stop() ||
wait_event_interruptible(
channel->playback_waitq,
kthread_should_stop() ||
- (mbo = most_get_mbo(channel->iface, channel->id)));
+ (mbo = most_get_mbo(channel->iface, channel->id,
+ &audio_aim)));
- mbo = most_get_mbo(&dev->most_iface, dev->atx_idx);
+ mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
static struct device *class_glue_dir;
static struct ida mdev_id;
static int modref;
static struct device *class_glue_dir;
static struct ida mdev_id;
static int modref;
+static int dummy_num_buffers;
struct most_c_obj {
struct kobject kobj;
struct most_c_obj {
struct kobject kobj;
struct most_aim *second_aim;
int first_aim_refs;
int second_aim_refs;
struct most_aim *second_aim;
int first_aim_refs;
int second_aim_refs;
+ int first_num_buffers;
+ int second_num_buffers;
struct list_head trash_fifo;
struct task_struct *hdm_enqueue_task;
struct mutex stop_task_mutex;
struct list_head trash_fifo;
struct task_struct *hdm_enqueue_task;
struct mutex stop_task_mutex;
}
spin_lock_irqsave(&c->fifo_lock, flags);
}
spin_lock_irqsave(&c->fifo_lock, flags);
+ ++*mbo->num_buffers_ptr;
list_add_tail(&mbo->list, &c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
list_add_tail(&mbo->list, &c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
goto _error1;
}
mbo->complete = compl;
goto _error1;
}
mbo->complete = compl;
+ mbo->num_buffers_ptr = &dummy_num_buffers;
if (dir == MOST_CH_RX) {
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level);
if (dir == MOST_CH_RX) {
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level);
* This attempts to get a free buffer out of the channel fifo.
* Returns a pointer to MBO on success or NULL otherwise.
*/
* This attempts to get a free buffer out of the channel fifo.
* Returns a pointer to MBO on success or NULL otherwise.
*/
-struct mbo *most_get_mbo(struct most_interface *iface, int id)
+struct mbo *most_get_mbo(struct most_interface *iface, int id,
+ struct most_aim *aim)
{
struct mbo *mbo;
struct most_c_obj *c;
unsigned long flags;
{
struct mbo *mbo;
struct most_c_obj *c;
unsigned long flags;
c = get_channel_by_iface(iface, id);
if (unlikely(!c))
return NULL;
c = get_channel_by_iface(iface, id);
if (unlikely(!c))
return NULL;
+
+ if (c->first_aim_refs && c->second_aim_refs &&
+ ((aim == c->first_aim && c->first_num_buffers <= 0) ||
+ (aim == c->second_aim && c->second_num_buffers <= 0)))
+ return NULL;
+
+ if (aim == c->first_aim)
+ num_buffers_ptr = &c->first_num_buffers;
+ else if (aim == c->second_aim)
+ num_buffers_ptr = &c->second_num_buffers;
+ else
+ num_buffers_ptr = &dummy_num_buffers;
+
spin_lock_irqsave(&c->fifo_lock, flags);
if (list_empty(&c->fifo)) {
spin_unlock_irqrestore(&c->fifo_lock, flags);
return NULL;
}
mbo = list_pop_mbo(&c->fifo);
spin_lock_irqsave(&c->fifo_lock, flags);
if (list_empty(&c->fifo)) {
spin_unlock_irqrestore(&c->fifo_lock, flags);
return NULL;
}
mbo = list_pop_mbo(&c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+ mbo->num_buffers_ptr = num_buffers_ptr;
mbo->buffer_length = c->cfg.buffer_size;
return mbo;
}
mbo->buffer_length = c->cfg.buffer_size;
return mbo;
}
goto error;
c->is_starving = 0;
goto error;
c->is_starving = 0;
+ c->first_num_buffers = c->cfg.num_buffers / 2;
+ c->second_num_buffers = c->cfg.num_buffers - c->first_num_buffers;
atomic_set(&c->mbo_ref, num_buffer);
out:
atomic_set(&c->mbo_ref, num_buffer);
out:
void *priv;
struct list_head list;
struct most_interface *ifp;
void *priv;
struct list_head list;
struct most_interface *ifp;
u16 hdm_channel_id;
void *virt_address;
dma_addr_t bus_address;
u16 hdm_channel_id;
void *virt_address;
dma_addr_t bus_address;
void most_resume_enqueue(struct most_interface *iface, int channel_idx);
int most_register_aim(struct most_aim *aim);
int most_deregister_aim(struct most_aim *aim);
void most_resume_enqueue(struct most_interface *iface, int channel_idx);
int most_register_aim(struct most_aim *aim);
int most_deregister_aim(struct most_aim *aim);
-struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
+ struct most_aim *);
void most_put_mbo(struct mbo *mbo);
int most_start_channel(struct most_interface *iface, int channel_idx,
struct most_aim *);
void most_put_mbo(struct mbo *mbo);
int most_start_channel(struct most_interface *iface, int channel_idx,
struct most_aim *);