Merge branch 'late/cleanup' into devel-late
[firefly-linux-kernel-4.4.55.git] / drivers / staging / iio / accel / lis3l02dq_ring.c
1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
7 #include <linux/export.h>
8
9 #include <linux/iio/iio.h>
10 #include "../ring_sw.h"
11 #include <linux/iio/kfifo_buf.h>
12 #include <linux/iio/trigger.h>
13 #include <linux/iio/trigger_consumer.h>
14 #include "lis3l02dq.h"
15
16 /**
17  * combine_8_to_16() utility function to munge to u8s into u16
18  **/
19 static inline u16 combine_8_to_16(u8 lower, u8 upper)
20 {
21         u16 _lower = lower;
22         u16 _upper = upper;
23         return _lower | (_upper << 8);
24 }
25
26 /**
27  * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
28  **/
29 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
30 {
31         struct iio_dev *indio_dev = private;
32         struct lis3l02dq_state *st = iio_priv(indio_dev);
33
34         if (st->trigger_on) {
35                 iio_trigger_poll(st->trig, iio_get_time_ns());
36                 return IRQ_HANDLED;
37         } else
38                 return IRQ_WAKE_THREAD;
39 }
40
41 static const u8 read_all_tx_array[] = {
42         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
43         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
44         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
45         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
46         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
47         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
48 };
49
50 /**
51  * lis3l02dq_read_all() Reads all channels currently selected
52  * @st:         device specific state
53  * @rx_array:   (dma capable) receive array, must be at least
54  *              4*number of channels
55  **/
56 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
57 {
58         struct lis3l02dq_state *st = iio_priv(indio_dev);
59         struct spi_transfer *xfers;
60         struct spi_message msg;
61         int ret, i, j = 0;
62
63         xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
64                                       indio_dev->masklength) * 2,
65                         sizeof(*xfers), GFP_KERNEL);
66         if (!xfers)
67                 return -ENOMEM;
68
69         mutex_lock(&st->buf_lock);
70
71         for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
72                 if (test_bit(i, indio_dev->active_scan_mask)) {
73                         /* lower byte */
74                         xfers[j].tx_buf = st->tx + 2*j;
75                         st->tx[2*j] = read_all_tx_array[i*4];
76                         st->tx[2*j + 1] = 0;
77                         if (rx_array)
78                                 xfers[j].rx_buf = rx_array + j*2;
79                         xfers[j].bits_per_word = 8;
80                         xfers[j].len = 2;
81                         xfers[j].cs_change = 1;
82                         j++;
83
84                         /* upper byte */
85                         xfers[j].tx_buf = st->tx + 2*j;
86                         st->tx[2*j] = read_all_tx_array[i*4 + 2];
87                         st->tx[2*j + 1] = 0;
88                         if (rx_array)
89                                 xfers[j].rx_buf = rx_array + j*2;
90                         xfers[j].bits_per_word = 8;
91                         xfers[j].len = 2;
92                         xfers[j].cs_change = 1;
93                         j++;
94                 }
95
96         /* After these are transmitted, the rx_buff should have
97          * values in alternate bytes
98          */
99         spi_message_init(&msg);
100         for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
101                                       indio_dev->masklength) * 2; j++)
102                 spi_message_add_tail(&xfers[j], &msg);
103
104         ret = spi_sync(st->us, &msg);
105         mutex_unlock(&st->buf_lock);
106         kfree(xfers);
107
108         return ret;
109 }
110
111 static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
112                                 u8 *buf)
113 {
114         int ret, i;
115         u8 *rx_array ;
116         s16 *data = (s16 *)buf;
117         int scan_count = bitmap_weight(indio_dev->active_scan_mask,
118                                        indio_dev->masklength);
119
120         rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
121         if (rx_array == NULL)
122                 return -ENOMEM;
123         ret = lis3l02dq_read_all(indio_dev, rx_array);
124         if (ret < 0)
125                 return ret;
126         for (i = 0; i < scan_count; i++)
127                 data[i] = combine_8_to_16(rx_array[i*4+1],
128                                         rx_array[i*4+3]);
129         kfree(rx_array);
130
131         return i*sizeof(data[0]);
132 }
133
134 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
135 {
136         struct iio_poll_func *pf = p;
137         struct iio_dev *indio_dev = pf->indio_dev;
138         struct iio_buffer *buffer = indio_dev->buffer;
139         int len = 0;
140         char *data;
141
142         data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
143         if (data == NULL) {
144                 dev_err(indio_dev->dev.parent,
145                         "memory alloc failed in buffer bh");
146                 return -ENOMEM;
147         }
148
149         if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
150                 len = lis3l02dq_get_buffer_element(indio_dev, data);
151
152           /* Guaranteed to be aligned with 8 byte boundary */
153         if (indio_dev->scan_timestamp)
154                 *(s64 *)(((phys_addr_t)data + len
155                                 + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
156                         = pf->timestamp;
157         buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);
158
159         iio_trigger_notify_done(indio_dev->trig);
160         kfree(data);
161         return IRQ_HANDLED;
162 }
163
164 /* Caller responsible for locking as necessary. */
165 static int
166 __lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
167 {
168         int ret;
169         u8 valold;
170         bool currentlyset;
171         struct lis3l02dq_state *st = iio_priv(indio_dev);
172
173 /* Get the current event mask register */
174         ret = lis3l02dq_spi_read_reg_8(indio_dev,
175                                        LIS3L02DQ_REG_CTRL_2_ADDR,
176                                        &valold);
177         if (ret)
178                 goto error_ret;
179 /* Find out if data ready is already on */
180         currentlyset
181                 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
182
183 /* Disable requested */
184         if (!state && currentlyset) {
185                 /* disable the data ready signal */
186                 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
187
188                 /* The double write is to overcome a hardware bug?*/
189                 ret = lis3l02dq_spi_write_reg_8(indio_dev,
190                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
191                                                 valold);
192                 if (ret)
193                         goto error_ret;
194                 ret = lis3l02dq_spi_write_reg_8(indio_dev,
195                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
196                                                 valold);
197                 if (ret)
198                         goto error_ret;
199                 st->trigger_on = false;
200 /* Enable requested */
201         } else if (state && !currentlyset) {
202                 /* if not set, enable requested */
203                 /* first disable all events */
204                 ret = lis3l02dq_disable_all_events(indio_dev);
205                 if (ret < 0)
206                         goto error_ret;
207
208                 valold = ret |
209                         LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
210
211                 st->trigger_on = true;
212                 ret = lis3l02dq_spi_write_reg_8(indio_dev,
213                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
214                                                 valold);
215                 if (ret)
216                         goto error_ret;
217         }
218
219         return 0;
220 error_ret:
221         return ret;
222 }
223
224 /**
225  * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
226  *
227  * If disabling the interrupt also does a final read to ensure it is clear.
228  * This is only important in some cases where the scan enable elements are
229  * switched before the buffer is reenabled.
230  **/
231 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
232                                                 bool state)
233 {
234         struct iio_dev *indio_dev = trig->private_data;
235         int ret = 0;
236         u8 t;
237
238         __lis3l02dq_write_data_ready_config(indio_dev, state);
239         if (state == false) {
240                 /*
241                  * A possible quirk with the handler is currently worked around
242                  *  by ensuring outstanding read events are cleared.
243                  */
244                 ret = lis3l02dq_read_all(indio_dev, NULL);
245         }
246         lis3l02dq_spi_read_reg_8(indio_dev,
247                                  LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
248                                  &t);
249         return ret;
250 }
251
252 /**
253  * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
254  * @trig:       the datardy trigger
255  */
256 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
257 {
258         struct iio_dev *indio_dev = trig->private_data;
259         struct lis3l02dq_state *st = iio_priv(indio_dev);
260         int i;
261
262         /* If gpio still high (or high again) */
263         /* In theory possible we will need to do this several times */
264         for (i = 0; i < 5; i++)
265                 if (gpio_get_value(irq_to_gpio(st->us->irq)))
266                         lis3l02dq_read_all(indio_dev, NULL);
267                 else
268                         break;
269         if (i == 5)
270                 printk(KERN_INFO
271                        "Failed to clear the interrupt for lis3l02dq\n");
272
273         /* irq reenabled so success! */
274         return 0;
275 }
276
277 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
278         .owner = THIS_MODULE,
279         .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
280         .try_reenable = &lis3l02dq_trig_try_reen,
281 };
282
283 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
284 {
285         int ret;
286         struct lis3l02dq_state *st = iio_priv(indio_dev);
287
288         st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
289         if (!st->trig) {
290                 ret = -ENOMEM;
291                 goto error_ret;
292         }
293
294         st->trig->dev.parent = &st->us->dev;
295         st->trig->ops = &lis3l02dq_trigger_ops;
296         st->trig->private_data = indio_dev;
297         ret = iio_trigger_register(st->trig);
298         if (ret)
299                 goto error_free_trig;
300
301         return 0;
302
303 error_free_trig:
304         iio_trigger_free(st->trig);
305 error_ret:
306         return ret;
307 }
308
309 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
310 {
311         struct lis3l02dq_state *st = iio_priv(indio_dev);
312
313         iio_trigger_unregister(st->trig);
314         iio_trigger_free(st->trig);
315 }
316
317 void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
318 {
319         iio_dealloc_pollfunc(indio_dev->pollfunc);
320         lis3l02dq_free_buf(indio_dev->buffer);
321 }
322
323 static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
324 {
325         /* Disable unwanted channels otherwise the interrupt will not clear */
326         u8 t;
327         int ret;
328         bool oneenabled = false;
329
330         ret = lis3l02dq_spi_read_reg_8(indio_dev,
331                                        LIS3L02DQ_REG_CTRL_1_ADDR,
332                                        &t);
333         if (ret)
334                 goto error_ret;
335
336         if (test_bit(0, indio_dev->active_scan_mask)) {
337                 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
338                 oneenabled = true;
339         } else
340                 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
341         if (test_bit(1, indio_dev->active_scan_mask)) {
342                 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
343                 oneenabled = true;
344         } else
345                 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
346         if (test_bit(2, indio_dev->active_scan_mask)) {
347                 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
348                 oneenabled = true;
349         } else
350                 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
351
352         if (!oneenabled) /* what happens in this case is unknown */
353                 return -EINVAL;
354         ret = lis3l02dq_spi_write_reg_8(indio_dev,
355                                         LIS3L02DQ_REG_CTRL_1_ADDR,
356                                         t);
357         if (ret)
358                 goto error_ret;
359
360         return iio_triggered_buffer_postenable(indio_dev);
361 error_ret:
362         return ret;
363 }
364
365 /* Turn all channels on again */
366 static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
367 {
368         u8 t;
369         int ret;
370
371         ret = iio_triggered_buffer_predisable(indio_dev);
372         if (ret)
373                 goto error_ret;
374
375         ret = lis3l02dq_spi_read_reg_8(indio_dev,
376                                        LIS3L02DQ_REG_CTRL_1_ADDR,
377                                        &t);
378         if (ret)
379                 goto error_ret;
380         t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
381                 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
382                 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
383
384         ret = lis3l02dq_spi_write_reg_8(indio_dev,
385                                         LIS3L02DQ_REG_CTRL_1_ADDR,
386                                         t);
387
388 error_ret:
389         return ret;
390 }
391
392 static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
393         .preenable = &iio_sw_buffer_preenable,
394         .postenable = &lis3l02dq_buffer_postenable,
395         .predisable = &lis3l02dq_buffer_predisable,
396 };
397
398 int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
399 {
400         int ret;
401         struct iio_buffer *buffer;
402
403         buffer = lis3l02dq_alloc_buf(indio_dev);
404         if (!buffer)
405                 return -ENOMEM;
406
407         indio_dev->buffer = buffer;
408
409         buffer->scan_timestamp = true;
410         indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
411
412         /* Functions are NULL as we set handler below */
413         indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
414                                                  &lis3l02dq_trigger_handler,
415                                                  0,
416                                                  indio_dev,
417                                                  "lis3l02dq_consumer%d",
418                                                  indio_dev->id);
419
420         if (indio_dev->pollfunc == NULL) {
421                 ret = -ENOMEM;
422                 goto error_iio_sw_rb_free;
423         }
424
425         indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
426         return 0;
427
428 error_iio_sw_rb_free:
429         lis3l02dq_free_buf(indio_dev->buffer);
430         return ret;
431 }