X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=drivers%2Fspi%2Fspi.c;h=095cfaded1c0dd084111af40d0e0a2c70ebe32d7;hb=0ffd56dafe02b40ec2a7a61e8c74b0061add0677;hp=a93551bc5b7c6793e8fbf532f76f0ea375c7d878;hpb=23694c36211931ac9a716619a4151c3a59e403fa;p=firefly-linux-kernel-4.4.55.git diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a93551bc5b7c..095cfaded1c0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1,7 +1,8 @@ /* - * spi.c - SPI init/core code + * SPI init/core code * * Copyright (C) 2005 David Brownell + * Copyright (C) 2008 Secret Lab Technologies Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,16 +20,24 @@ */ #include +#include #include #include #include #include #include +#include #include #include #include -#include +#include #include +#include +#include +#include +#include +#include +#include static void spidev_release(struct device *dev) { @@ -47,7 +56,7 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf) { const struct spi_device *spi = to_spi_device(dev); - return sprintf(buf, "%s\n", spi->modalias); + return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); } static struct device_attribute spi_dev_attrs[] = { @@ -87,6 +96,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) if (of_driver_match_device(dev, drv)) return 1; + /* Then try ACPI */ + if (acpi_driver_match_device(dev, drv)) + return 1; + if (sdrv->id_table) return !!spi_match_id(sdrv->id_table, spi); @@ -210,7 +223,7 @@ static const struct dev_pm_ops spi_pm = { SET_RUNTIME_PM_OPS( pm_generic_runtime_suspend, pm_generic_runtime_resume, - pm_generic_runtime_idle + NULL ) }; @@ -321,6 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) spi->dev.parent = &master->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; + spi->cs_gpio = -ENOENT; device_initialize(&spi->dev); return spi; } @@ -338,15 +352,16 @@ EXPORT_SYMBOL_GPL(spi_alloc_device); int spi_add_device(struct spi_device *spi) { static DEFINE_MUTEX(spi_add_lock); - struct device *dev = spi->master->dev.parent; + struct spi_master *master = spi->master; + struct device *dev = master->dev.parent; struct device *d; int status; /* Chipselects are numbered 0..max; validate. */ - if (spi->chip_select >= spi->master->num_chipselect) { + if (spi->chip_select >= master->num_chipselect) { dev_err(dev, "cs%d >= max %d\n", spi->chip_select, - spi->master->num_chipselect); + master->num_chipselect); return -EINVAL; } @@ -370,6 +385,9 @@ int spi_add_device(struct spi_device *spi) goto done; } + if (master->cs_gpios) + spi->cs_gpio = master->cs_gpios[spi->chip_select]; + /* Drivers may modify this initial i/o setup, but will * normally rely on the device being setup. Devices * using SPI_CS_HIGH can't coexist well otherwise... @@ -480,8 +498,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master, * The board info passed can safely be __initdata ... but be careful of * any embedded pointers (platform_data, etc), they're copied as-is. */ -int __init -spi_register_board_info(struct spi_board_info const *info, unsigned n) +int spi_register_board_info(struct spi_board_info const *info, unsigned n) { struct boardinfo *bi; int i; @@ -506,6 +523,480 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) /*-------------------------------------------------------------------------*/ +/** + * spi_pump_messages - kthread work function which processes spi message queue + * @work: pointer to kthread work struct contained in the master struct + * + * This function checks if there is any spi message in the queue that + * needs processing and if so call out to the driver to initialize hardware + * and transfer each message. + * + */ +static void spi_pump_messages(struct kthread_work *work) +{ + struct spi_master *master = + container_of(work, struct spi_master, pump_messages); + unsigned long flags; + bool was_busy = false; + int ret; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&master->queue_lock, flags); + if (list_empty(&master->queue) || !master->running) { + if (!master->busy) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + master->busy = false; + spin_unlock_irqrestore(&master->queue_lock, flags); + if (master->unprepare_transfer_hardware && + master->unprepare_transfer_hardware(master)) + dev_err(&master->dev, + "failed to unprepare transfer hardware\n"); + return; + } + + /* Make sure we are not already running a message */ + if (master->cur_msg) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + /* Extract head of queue */ + master->cur_msg = + list_entry(master->queue.next, struct spi_message, queue); + + list_del_init(&master->cur_msg->queue); + if (master->busy) + was_busy = true; + else + master->busy = true; + spin_unlock_irqrestore(&master->queue_lock, flags); + + if (!was_busy && master->prepare_transfer_hardware) { + ret = master->prepare_transfer_hardware(master); + if (ret) { + dev_err(&master->dev, + "failed to prepare transfer hardware\n"); + return; + } + } + + ret = master->transfer_one_message(master, master->cur_msg); + if (ret) { + dev_err(&master->dev, + "failed to transfer one message from queue\n"); + return; + } +} + +static int spi_init_queue(struct spi_master *master) +{ + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + + INIT_LIST_HEAD(&master->queue); + spin_lock_init(&master->queue_lock); + + master->running = false; + master->busy = false; + + init_kthread_worker(&master->kworker); + master->kworker_task = kthread_run(kthread_worker_fn, + &master->kworker, + dev_name(&master->dev)); + if (IS_ERR(master->kworker_task)) { + dev_err(&master->dev, "failed to create message pump task\n"); + return -ENOMEM; + } + init_kthread_work(&master->pump_messages, spi_pump_messages); + + /* + * Master config will indicate if this controller should run the + * message pump with high (realtime) priority to reduce the transfer + * latency on the bus by minimising the delay between a transfer + * request and the scheduling of the message pump thread. Without this + * setting the message pump thread will remain at default priority. + */ + if (master->rt) { + dev_info(&master->dev, + "will run message pump with realtime priority\n"); + sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); + } + + return 0; +} + +/** + * spi_get_next_queued_message() - called by driver to check for queued + * messages + * @master: the master to check for queued messages + * + * If there are more messages in the queue, the next message is returned from + * this call. + */ +struct spi_message *spi_get_next_queued_message(struct spi_master *master) +{ + struct spi_message *next; + unsigned long flags; + + /* get a pointer to the next message, if any */ + spin_lock_irqsave(&master->queue_lock, flags); + if (list_empty(&master->queue)) + next = NULL; + else + next = list_entry(master->queue.next, + struct spi_message, queue); + spin_unlock_irqrestore(&master->queue_lock, flags); + + return next; +} +EXPORT_SYMBOL_GPL(spi_get_next_queued_message); + +/** + * spi_finalize_current_message() - the current message is complete + * @master: the master to return the message to + * + * Called by the driver to notify the core that the message in the front of the + * queue is complete and can be removed from the queue. + */ +void spi_finalize_current_message(struct spi_master *master) +{ + struct spi_message *mesg; + unsigned long flags; + + spin_lock_irqsave(&master->queue_lock, flags); + mesg = master->cur_msg; + master->cur_msg = NULL; + + queue_kthread_work(&master->kworker, &master->pump_messages); + spin_unlock_irqrestore(&master->queue_lock, flags); + + mesg->state = NULL; + if (mesg->complete) + mesg->complete(mesg->context); +} +EXPORT_SYMBOL_GPL(spi_finalize_current_message); + +static int spi_start_queue(struct spi_master *master) +{ + unsigned long flags; + + spin_lock_irqsave(&master->queue_lock, flags); + + if (master->running || master->busy) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return -EBUSY; + } + + master->running = true; + master->cur_msg = NULL; + spin_unlock_irqrestore(&master->queue_lock, flags); + + queue_kthread_work(&master->kworker, &master->pump_messages); + + return 0; +} + +static int spi_stop_queue(struct spi_master *master) +{ + unsigned long flags; + unsigned limit = 500; + int ret = 0; + + spin_lock_irqsave(&master->queue_lock, flags); + + /* + * This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the master->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead. + */ + while ((!list_empty(&master->queue) || master->busy) && limit--) { + spin_unlock_irqrestore(&master->queue_lock, flags); + msleep(10); + spin_lock_irqsave(&master->queue_lock, flags); + } + + if (!list_empty(&master->queue) || master->busy) + ret = -EBUSY; + else + master->running = false; + + spin_unlock_irqrestore(&master->queue_lock, flags); + + if (ret) { + dev_warn(&master->dev, + "could not stop message queue\n"); + return ret; + } + return ret; +} + +static int spi_destroy_queue(struct spi_master *master) +{ + int ret; + + ret = spi_stop_queue(master); + + /* + * flush_kthread_worker will block until all work is done. + * If the reason that stop_queue timed out is that the work will never + * finish, then it does no good to call flush/stop thread, so + * return anyway. + */ + if (ret) { + dev_err(&master->dev, "problem destroying queue\n"); + return ret; + } + + flush_kthread_worker(&master->kworker); + kthread_stop(master->kworker_task); + + return 0; +} + +/** + * spi_queued_transfer - transfer function for queued transfers + * @spi: spi device which is requesting transfer + * @msg: spi message which is to handled is queued to driver queue + */ +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct spi_master *master = spi->master; + unsigned long flags; + + spin_lock_irqsave(&master->queue_lock, flags); + + if (!master->running) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return -ESHUTDOWN; + } + msg->actual_length = 0; + msg->status = -EINPROGRESS; + + list_add_tail(&msg->queue, &master->queue); + if (master->running && !master->busy) + queue_kthread_work(&master->kworker, &master->pump_messages); + + spin_unlock_irqrestore(&master->queue_lock, flags); + return 0; +} + +static int spi_master_initialize_queue(struct spi_master *master) +{ + int ret; + + master->queued = true; + master->transfer = spi_queued_transfer; + + /* Initialize and start queue */ + ret = spi_init_queue(master); + if (ret) { + dev_err(&master->dev, "problem initializing queue\n"); + goto err_init_queue; + } + ret = spi_start_queue(master); + if (ret) { + dev_err(&master->dev, "problem starting queue\n"); + goto err_start_queue; + } + + return 0; + +err_start_queue: +err_init_queue: + spi_destroy_queue(master); + return ret; +} + +/*-------------------------------------------------------------------------*/ + +#if defined(CONFIG_OF) +/** + * of_register_spi_devices() - Register child devices onto the SPI bus + * @master: Pointer to spi_master device + * + * Registers an spi_device for each child node of master node which has a 'reg' + * property. + */ +static void of_register_spi_devices(struct spi_master *master) +{ + struct spi_device *spi; + struct device_node *nc; + const __be32 *prop; + char modalias[SPI_NAME_SIZE + 4]; + int rc; + int len; + + if (!master->dev.of_node) + return; + + for_each_available_child_of_node(master->dev.of_node, nc) { + /* Alloc an spi_device */ + spi = spi_alloc_device(master); + if (!spi) { + dev_err(&master->dev, "spi_device alloc error for %s\n", + nc->full_name); + spi_dev_put(spi); + continue; + } + + /* Select device driver */ + if (of_modalias_node(nc, spi->modalias, + sizeof(spi->modalias)) < 0) { + dev_err(&master->dev, "cannot find modalias for %s\n", + nc->full_name); + spi_dev_put(spi); + continue; + } + + /* Device address */ + prop = of_get_property(nc, "reg", &len); + if (!prop || len < sizeof(*prop)) { + dev_err(&master->dev, "%s has no 'reg' property\n", + nc->full_name); + spi_dev_put(spi); + continue; + } + spi->chip_select = be32_to_cpup(prop); + + /* Mode (clock phase/polarity/etc.) */ + if (of_find_property(nc, "spi-cpha", NULL)) + spi->mode |= SPI_CPHA; + if (of_find_property(nc, "spi-cpol", NULL)) + spi->mode |= SPI_CPOL; + if (of_find_property(nc, "spi-cs-high", NULL)) + spi->mode |= SPI_CS_HIGH; + if (of_find_property(nc, "spi-3wire", NULL)) + spi->mode |= SPI_3WIRE; + + /* Device speed */ + prop = of_get_property(nc, "spi-max-frequency", &len); + if (!prop || len < sizeof(*prop)) { + dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", + nc->full_name); + spi_dev_put(spi); + continue; + } + spi->max_speed_hz = be32_to_cpup(prop); + + /* IRQ */ + spi->irq = irq_of_parse_and_map(nc, 0); + + /* Store a pointer to the node in the device structure */ + of_node_get(nc); + spi->dev.of_node = nc; + + /* Register the new device */ + snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX, + spi->modalias); + request_module(modalias); + rc = spi_add_device(spi); + if (rc) { + dev_err(&master->dev, "spi_device register error %s\n", + nc->full_name); + spi_dev_put(spi); + } + + } +} +#else +static void of_register_spi_devices(struct spi_master *master) { } +#endif + +#ifdef CONFIG_ACPI +static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) +{ + struct spi_device *spi = data; + + if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { + struct acpi_resource_spi_serialbus *sb; + + sb = &ares->data.spi_serial_bus; + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { + spi->chip_select = sb->device_selection; + spi->max_speed_hz = sb->connection_speed; + + if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) + spi->mode |= SPI_CPHA; + if (sb->clock_polarity == ACPI_SPI_START_HIGH) + spi->mode |= SPI_CPOL; + if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) + spi->mode |= SPI_CS_HIGH; + } + } else if (spi->irq < 0) { + struct resource r; + + if (acpi_dev_resource_interrupt(ares, 0, &r)) + spi->irq = r.start; + } + + /* Always tell the ACPI core to skip this resource */ + return 1; +} + +static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct spi_master *master = data; + struct list_head resource_list; + struct acpi_device *adev; + struct spi_device *spi; + int ret; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + if (acpi_bus_get_status(adev) || !adev->status.present) + return AE_OK; + + spi = spi_alloc_device(master); + if (!spi) { + dev_err(&master->dev, "failed to allocate SPI device for %s\n", + dev_name(&adev->dev)); + return AE_NO_MEMORY; + } + + ACPI_HANDLE_SET(&spi->dev, handle); + spi->irq = -1; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, + acpi_spi_add_resource, spi); + acpi_dev_free_resource_list(&resource_list); + + if (ret < 0 || !spi->max_speed_hz) { + spi_dev_put(spi); + return AE_OK; + } + + strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias)); + if (spi_add_device(spi)) { + dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", + dev_name(&adev->dev)); + spi_dev_put(spi); + } + + return AE_OK; +} + +static void acpi_register_spi_devices(struct spi_master *master) +{ + acpi_status status; + acpi_handle handle; + + handle = ACPI_HANDLE(master->dev.parent); + if (!handle) + return; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_spi_add_device, NULL, + master, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); +} +#else +static inline void acpi_register_spi_devices(struct spi_master *master) {} +#endif /* CONFIG_ACPI */ + static void spi_master_release(struct device *dev) { struct spi_master *master; @@ -521,6 +1012,7 @@ static struct class spi_master_class = { }; + /** * spi_alloc_master - allocate SPI master controller * @dev: the controller, possibly using the platform_bus @@ -538,7 +1030,8 @@ static struct class spi_master_class = { * * The caller is responsible for assigning the bus number and initializing * the master's methods before calling spi_register_master(); and (after errors - * adding the device) calling spi_master_put() to prevent a memory leak. + * adding the device) calling spi_master_put() and kfree() to prevent a memory + * leak. */ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) { @@ -552,6 +1045,8 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) return NULL; device_initialize(&master->dev); + master->bus_num = -1; + master->num_chipselect = 1; master->dev.class = &spi_master_class; master->dev.parent = get_device(dev); spi_master_set_devdata(master, &master[1]); @@ -560,6 +1055,47 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) } EXPORT_SYMBOL_GPL(spi_alloc_master); +#ifdef CONFIG_OF +static int of_spi_register_master(struct spi_master *master) +{ + int nb, i, *cs; + struct device_node *np = master->dev.of_node; + + if (!np) + return 0; + + nb = of_gpio_named_count(np, "cs-gpios"); + master->num_chipselect = max(nb, (int)master->num_chipselect); + + /* Return error only for an incorrectly formed cs-gpios property */ + if (nb == 0 || nb == -ENOENT) + return 0; + else if (nb < 0) + return nb; + + cs = devm_kzalloc(&master->dev, + sizeof(int) * master->num_chipselect, + GFP_KERNEL); + master->cs_gpios = cs; + + if (!master->cs_gpios) + return -ENOMEM; + + for (i = 0; i < master->num_chipselect; i++) + cs[i] = -ENOENT; + + for (i = 0; i < nb; i++) + cs[i] = of_get_named_gpio(np, "cs-gpios", i); + + return 0; +} +#else +static int of_spi_register_master(struct spi_master *master) +{ + return 0; +} +#endif + /** * spi_register_master - register SPI master controller * @master: initialized master, originally from spi_alloc_master() @@ -591,12 +1127,19 @@ int spi_register_master(struct spi_master *master) if (!dev) return -ENODEV; + status = of_spi_register_master(master); + if (status) + return status; + /* even if it's just one always-selected device, there must * be at least one chipselect */ if (master->num_chipselect == 0) return -EINVAL; + if ((master->bus_num < 0) && master->dev.of_node) + master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); + /* convention: dynamically assigned bus IDs count down from the max */ if (master->bus_num < 0) { /* FIXME switch to an IDR based scheme, something like @@ -620,22 +1163,31 @@ int spi_register_master(struct spi_master *master) dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), dynamic ? " (dynamic)" : ""); + /* If we're using a queued driver, start the queue */ + if (master->transfer) + dev_info(dev, "master is unqueued, this is deprecated\n"); + else { + status = spi_master_initialize_queue(master); + if (status) { + device_unregister(&master->dev); + goto done; + } + } + mutex_lock(&board_lock); list_add_tail(&master->list, &spi_master_list); list_for_each_entry(bi, &board_list, list) spi_match_master_to_boardinfo(master, &bi->board_info); mutex_unlock(&board_lock); - status = 0; - - /* Register devices from the device tree */ + /* Register devices from the device tree and ACPI */ of_register_spi_devices(master); + acpi_register_spi_devices(master); done: return status; } EXPORT_SYMBOL_GPL(spi_register_master); - static int __unregister(struct device *dev, void *null) { spi_unregister_device(to_spi_device(dev)); @@ -656,6 +1208,11 @@ void spi_unregister_master(struct spi_master *master) { int dummy; + if (master->queued) { + if (spi_destroy_queue(master)) + dev_err(&master->dev, "queue remove failed\n"); + } + mutex_lock(&board_lock); list_del(&master->list); mutex_unlock(&board_lock); @@ -665,10 +1222,41 @@ void spi_unregister_master(struct spi_master *master) } EXPORT_SYMBOL_GPL(spi_unregister_master); -static int __spi_master_match(struct device *dev, void *data) +int spi_master_suspend(struct spi_master *master) +{ + int ret; + + /* Basically no-ops for non-queued masters */ + if (!master->queued) + return 0; + + ret = spi_stop_queue(master); + if (ret) + dev_err(&master->dev, "queue stop failed\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_master_suspend); + +int spi_master_resume(struct spi_master *master) +{ + int ret; + + if (!master->queued) + return 0; + + ret = spi_start_queue(master); + if (ret) + dev_err(&master->dev, "queue restart failed\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_master_resume); + +static int __spi_master_match(struct device *dev, const void *data) { struct spi_master *m; - u16 *bus_num = data; + const u16 *bus_num = data; m = container_of(dev, struct spi_master, dev); return m->bus_num == *bus_num; @@ -726,7 +1314,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); int spi_setup(struct spi_device *spi) { unsigned bad_bits; - int status; + int status = 0; /* help drivers fail *cleanly* when they need options * that aren't supported with their current master @@ -741,7 +1329,8 @@ int spi_setup(struct spi_device *spi) if (!spi->bits_per_word) spi->bits_per_word = 8; - status = spi->master->setup(spi); + if (spi->master->setup) + status = spi->master->setup(spi); dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" "%u bits/w, %u Hz max --> %d\n", @@ -760,6 +1349,7 @@ EXPORT_SYMBOL_GPL(spi_setup); static int __spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_master *master = spi->master; + struct spi_transfer *xfer; /* Half-duplex links include original MicroWire, and ones with * only one data pin like SPI_3WIRE (switches direction) or where @@ -768,7 +1358,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) */ if ((master->flags & SPI_MASTER_HALF_DUPLEX) || (spi->mode & SPI_3WIRE)) { - struct spi_transfer *xfer; unsigned flags = master->flags; list_for_each_entry(xfer, &message->transfers, transfer_list) { @@ -781,6 +1370,25 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) } } + /** + * Set transfer bits_per_word and max speed as spi device default if + * it is not set for this transfer. + */ + list_for_each_entry(xfer, &message->transfers, transfer_list) { + if (!xfer->bits_per_word) + xfer->bits_per_word = spi->bits_per_word; + if (!xfer->speed_hz) + xfer->speed_hz = spi->max_speed_hz; + if (master->bits_per_word_mask) { + /* Only 32 bits fit in the mask */ + if (xfer->bits_per_word > 32) + return -EINVAL; + if (!(master->bits_per_word_mask & + BIT(xfer->bits_per_word - 1))) + return -EINVAL; + } + } + message->spi = spi; message->status = -EINPROGRESS; return master->transfer(spi, message); @@ -1023,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master) EXPORT_SYMBOL_GPL(spi_bus_unlock); /* portable code must never pass more than 32 bytes */ -#define SPI_BUFSIZ max(1028,SMP_CACHE_BYTES) +#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) static u8 *buf; @@ -1057,12 +1665,19 @@ int spi_write_then_read(struct spi_device *spi, struct spi_transfer x[2]; u8 *local_buf; - /* Use preallocated DMA-safe buffer. We can't avoid copying here, - * (as a pure convenience thing), but we can keep heap costs - * out of the hot path ... + /* Use preallocated DMA-safe buffer if we can. We can't avoid + * copying here, (as a pure convenience thing), but we can + * keep heap costs out of the hot path unless someone else is + * using the pre-allocated buffer or the transfer is too large. */ - if ((n_tx + n_rx) > SPI_BUFSIZ) - return -EINVAL; + if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { + local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), + GFP_KERNEL | GFP_DMA); + if (!local_buf) + return -ENOMEM; + } else { + local_buf = buf; + } spi_message_init(&message); memset(x, 0, sizeof x); @@ -1075,15 +1690,6 @@ int spi_write_then_read(struct spi_device *spi, spi_message_add_tail(&x[1], &message); } - /* ... unless someone else is using the pre-allocated buffer */ - if (!mutex_trylock(&lock)) { - local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); - if (!local_buf) - return -ENOMEM; - } else - local_buf = buf; - - memset(local_buf, 0, SPI_BUFSIZ); memcpy(local_buf, txbuf, n_tx); x[0].tx_buf = local_buf; x[1].rx_buf = local_buf + n_tx;