Merge remote-tracking branch 'lsk/v3.10/topic/asoc-compress' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / misc / mei / interrupt.c
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17
18 #include <linux/export.h>
19 #include <linux/pci.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/fs.h>
23 #include <linux/jiffies.h>
24
25 #include <linux/mei.h>
26
27 #include "mei_dev.h"
28 #include "hbm.h"
29 #include "hw-me.h"
30 #include "client.h"
31
32
33 /**
34  * mei_cl_complete_handler - processes completed operation for a client
35  *
36  * @cl: private data of the file object.
37  * @cb: callback block.
38  */
39 static void mei_cl_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb)
40 {
41         if (cb->fop_type == MEI_FOP_WRITE) {
42                 mei_io_cb_free(cb);
43                 cb = NULL;
44                 cl->writing_state = MEI_WRITE_COMPLETE;
45                 if (waitqueue_active(&cl->tx_wait))
46                         wake_up_interruptible(&cl->tx_wait);
47
48         } else if (cb->fop_type == MEI_FOP_READ &&
49                         MEI_READING == cl->reading_state) {
50                 cl->reading_state = MEI_READ_COMPLETE;
51                 if (waitqueue_active(&cl->rx_wait))
52                         wake_up_interruptible(&cl->rx_wait);
53                 else
54                         mei_cl_bus_rx_event(cl);
55
56         }
57 }
58
59 /**
60  * mei_irq_compl_handler - dispatch complete handelers
61  *      for the completed callbacks
62  *
63  * @dev - mei device
64  * @compl_list - list of completed cbs
65  */
66 void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
67 {
68         struct mei_cl_cb *cb, *next;
69         struct mei_cl *cl;
70
71         list_for_each_entry_safe(cb, next, &compl_list->list, list) {
72                 cl = cb->cl;
73                 list_del(&cb->list);
74                 if (!cl)
75                         continue;
76
77                 dev_dbg(&dev->pdev->dev, "completing call back.\n");
78                 if (cl == &dev->iamthif_cl)
79                         mei_amthif_complete(dev, cb);
80                 else
81                         mei_cl_complete_handler(cl, cb);
82         }
83 }
84 EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
85
86 /**
87  * mei_cl_hbm_equal - check if hbm is addressed to the client
88  *
89  * @cl: host client
90  * @mei_hdr: header of mei client message
91  *
92  * returns true if matches, false otherwise
93  */
94 static inline int mei_cl_hbm_equal(struct mei_cl *cl,
95                         struct mei_msg_hdr *mei_hdr)
96 {
97         return cl->host_client_id == mei_hdr->host_addr &&
98                 cl->me_client_id == mei_hdr->me_addr;
99 }
100 /**
101  * mei_cl_is_reading - checks if the client
102                 is the one to read this message
103  *
104  * @cl: mei client
105  * @mei_hdr: header of mei message
106  *
107  * returns true on match and false otherwise
108  */
109 static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
110 {
111         return mei_cl_hbm_equal(cl, mei_hdr) &&
112                 cl->state == MEI_FILE_CONNECTED &&
113                 cl->reading_state != MEI_READ_COMPLETE;
114 }
115
116 /**
117  * mei_irq_read_client_message - process client message
118  *
119  * @dev: the device structure
120  * @mei_hdr: header of mei client message
121  * @complete_list: An instance of our list structure
122  *
123  * returns 0 on success, <0 on failure.
124  */
125 static int mei_cl_irq_read_msg(struct mei_device *dev,
126                                struct mei_msg_hdr *mei_hdr,
127                                struct mei_cl_cb *complete_list)
128 {
129         struct mei_cl *cl;
130         struct mei_cl_cb *cb, *next;
131         unsigned char *buffer = NULL;
132
133         list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
134                 cl = cb->cl;
135                 if (!cl || !mei_cl_is_reading(cl, mei_hdr))
136                         continue;
137
138                 cl->reading_state = MEI_READING;
139
140                 if (cb->response_buffer.size == 0 ||
141                     cb->response_buffer.data == NULL) {
142                         dev_err(&dev->pdev->dev, "response buffer is not allocated.\n");
143                         list_del(&cb->list);
144                         return -ENOMEM;
145                 }
146
147                 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
148                         dev_dbg(&dev->pdev->dev, "message overflow. size %d len %d idx %ld\n",
149                                 cb->response_buffer.size,
150                                 mei_hdr->length, cb->buf_idx);
151                         buffer = krealloc(cb->response_buffer.data,
152                                           mei_hdr->length + cb->buf_idx,
153                                           GFP_KERNEL);
154
155                         if (!buffer) {
156                                 dev_err(&dev->pdev->dev, "allocation failed.\n");
157                                 list_del(&cb->list);
158                                 return -ENOMEM;
159                         }
160                         cb->response_buffer.data = buffer;
161                         cb->response_buffer.size =
162                                 mei_hdr->length + cb->buf_idx;
163                 }
164
165                 buffer = cb->response_buffer.data + cb->buf_idx;
166                 mei_read_slots(dev, buffer, mei_hdr->length);
167
168                 cb->buf_idx += mei_hdr->length;
169                 if (mei_hdr->msg_complete) {
170                         cl->status = 0;
171                         list_del(&cb->list);
172                         dev_dbg(&dev->pdev->dev, "completed read H cl = %d, ME cl = %d, length = %lu\n",
173                                 cl->host_client_id,
174                                 cl->me_client_id,
175                                 cb->buf_idx);
176                         list_add_tail(&cb->list, &complete_list->list);
177                 }
178                 break;
179         }
180
181         dev_dbg(&dev->pdev->dev, "message read\n");
182         if (!buffer) {
183                 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
184                 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
185                                 MEI_HDR_PRM(mei_hdr));
186         }
187
188         return 0;
189 }
190
191 /**
192  * _mei_irq_thread_close - processes close related operation.
193  *
194  * @dev: the device structure.
195  * @slots: free slots.
196  * @cb_pos: callback block.
197  * @cl: private data of the file object.
198  * @cmpl_list: complete list.
199  *
200  * returns 0, OK; otherwise, error.
201  */
202 static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
203                                 struct mei_cl_cb *cb_pos,
204                                 struct mei_cl *cl,
205                                 struct mei_cl_cb *cmpl_list)
206 {
207         u32 msg_slots =
208                 mei_data2slots(sizeof(struct hbm_client_connect_request));
209
210         if (*slots < msg_slots)
211                 return -EMSGSIZE;
212
213         *slots -= msg_slots;
214
215         if (mei_hbm_cl_disconnect_req(dev, cl)) {
216                 cl->status = 0;
217                 cb_pos->buf_idx = 0;
218                 list_move_tail(&cb_pos->list, &cmpl_list->list);
219                 return -EIO;
220         }
221
222         cl->state = MEI_FILE_DISCONNECTING;
223         cl->status = 0;
224         cb_pos->buf_idx = 0;
225         list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
226         cl->timer_count = MEI_CONNECT_TIMEOUT;
227
228         return 0;
229 }
230
231
232 /**
233  * _mei_irq_thread_read - processes read related operation.
234  *
235  * @dev: the device structure.
236  * @slots: free slots.
237  * @cb_pos: callback block.
238  * @cl: private data of the file object.
239  * @cmpl_list: complete list.
240  *
241  * returns 0, OK; otherwise, error.
242  */
243 static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
244                         struct mei_cl_cb *cb_pos,
245                         struct mei_cl *cl,
246                         struct mei_cl_cb *cmpl_list)
247 {
248         u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
249
250         if (*slots < msg_slots) {
251                 /* return the cancel routine */
252                 list_del(&cb_pos->list);
253                 return -EMSGSIZE;
254         }
255
256         *slots -= msg_slots;
257
258         if (mei_hbm_cl_flow_control_req(dev, cl)) {
259                 cl->status = -ENODEV;
260                 cb_pos->buf_idx = 0;
261                 list_move_tail(&cb_pos->list, &cmpl_list->list);
262                 return -ENODEV;
263         }
264         list_move_tail(&cb_pos->list, &dev->read_list.list);
265
266         return 0;
267 }
268
269
270 /**
271  * _mei_irq_thread_ioctl - processes ioctl related operation.
272  *
273  * @dev: the device structure.
274  * @slots: free slots.
275  * @cb_pos: callback block.
276  * @cl: private data of the file object.
277  * @cmpl_list: complete list.
278  *
279  * returns 0, OK; otherwise, error.
280  */
281 static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
282                         struct mei_cl_cb *cb_pos,
283                         struct mei_cl *cl,
284                         struct mei_cl_cb *cmpl_list)
285 {
286         u32 msg_slots =
287                 mei_data2slots(sizeof(struct hbm_client_connect_request));
288
289         if (*slots < msg_slots) {
290                 /* return the cancel routine */
291                 list_del(&cb_pos->list);
292                 return -EMSGSIZE;
293         }
294
295         *slots -=  msg_slots;
296
297         cl->state = MEI_FILE_CONNECTING;
298
299         if (mei_hbm_cl_connect_req(dev, cl)) {
300                 cl->status = -ENODEV;
301                 cb_pos->buf_idx = 0;
302                 list_del(&cb_pos->list);
303                 return -ENODEV;
304         } else {
305                 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
306                 cl->timer_count = MEI_CONNECT_TIMEOUT;
307         }
308         return 0;
309 }
310
311 /**
312  * mei_irq_thread_write_complete - write messages to device.
313  *
314  * @dev: the device structure.
315  * @slots: free slots.
316  * @cb: callback block.
317  * @cmpl_list: complete list.
318  *
319  * returns 0, OK; otherwise, error.
320  */
321 static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
322                         struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
323 {
324         struct mei_msg_hdr mei_hdr;
325         struct mei_cl *cl = cb->cl;
326         size_t len = cb->request_buffer.size - cb->buf_idx;
327         u32 msg_slots = mei_data2slots(len);
328
329         mei_hdr.host_addr = cl->host_client_id;
330         mei_hdr.me_addr = cl->me_client_id;
331         mei_hdr.reserved = 0;
332
333         if (*slots >= msg_slots) {
334                 mei_hdr.length = len;
335                 mei_hdr.msg_complete = 1;
336         /* Split the message only if we can write the whole host buffer */
337         } else if (*slots == dev->hbuf_depth) {
338                 msg_slots = *slots;
339                 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
340                 mei_hdr.length = len;
341                 mei_hdr.msg_complete = 0;
342         } else {
343                 /* wait for next time the host buffer is empty */
344                 return 0;
345         }
346
347         dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
348                         cb->request_buffer.size, cb->buf_idx);
349         dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
350
351         *slots -=  msg_slots;
352         if (mei_write_message(dev, &mei_hdr,
353                         cb->request_buffer.data + cb->buf_idx)) {
354                 cl->status = -ENODEV;
355                 list_move_tail(&cb->list, &cmpl_list->list);
356                 return -ENODEV;
357         }
358
359
360         cl->status = 0;
361         cb->buf_idx += mei_hdr.length;
362         if (mei_hdr.msg_complete) {
363                 if (mei_cl_flow_ctrl_reduce(cl))
364                         return -ENODEV;
365                 list_move_tail(&cb->list, &dev->write_waiting_list.list);
366         }
367
368         return 0;
369 }
370
371 /**
372  * mei_irq_read_handler - bottom half read routine after ISR to
373  * handle the read processing.
374  *
375  * @dev: the device structure
376  * @cmpl_list: An instance of our list structure
377  * @slots: slots to read.
378  *
379  * returns 0 on success, <0 on failure.
380  */
381 int mei_irq_read_handler(struct mei_device *dev,
382                 struct mei_cl_cb *cmpl_list, s32 *slots)
383 {
384         struct mei_msg_hdr *mei_hdr;
385         struct mei_cl *cl_pos = NULL;
386         struct mei_cl *cl_next = NULL;
387         int ret = 0;
388
389         if (!dev->rd_msg_hdr) {
390                 dev->rd_msg_hdr = mei_read_hdr(dev);
391                 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
392                 (*slots)--;
393                 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
394         }
395         mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
396         dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
397
398         if (mei_hdr->reserved || !dev->rd_msg_hdr) {
399                 dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
400                 ret = -EBADMSG;
401                 goto end;
402         }
403
404         if (mei_hdr->host_addr || mei_hdr->me_addr) {
405                 list_for_each_entry_safe(cl_pos, cl_next,
406                                         &dev->file_list, link) {
407                         dev_dbg(&dev->pdev->dev,
408                                         "list_for_each_entry_safe read host"
409                                         " client = %d, ME client = %d\n",
410                                         cl_pos->host_client_id,
411                                         cl_pos->me_client_id);
412                         if (mei_cl_hbm_equal(cl_pos, mei_hdr))
413                                 break;
414                 }
415
416                 if (&cl_pos->link == &dev->file_list) {
417                         dev_dbg(&dev->pdev->dev, "corrupted message header\n");
418                         ret = -EBADMSG;
419                         goto end;
420                 }
421         }
422         if (((*slots) * sizeof(u32)) < mei_hdr->length) {
423                 dev_err(&dev->pdev->dev,
424                                 "we can't read the message slots =%08x.\n",
425                                 *slots);
426                 /* we can't read the message */
427                 ret = -ERANGE;
428                 goto end;
429         }
430
431         /* decide where to read the message too */
432         if (!mei_hdr->host_addr) {
433                 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
434                 mei_hbm_dispatch(dev, mei_hdr);
435                 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
436         } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
437                    (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
438                    (dev->iamthif_state == MEI_IAMTHIF_READING)) {
439
440                 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
441                 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
442
443                 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
444                 if (ret)
445                         goto end;
446         } else {
447                 dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
448                 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
449                 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
450                 if (ret)
451                         goto end;
452         }
453
454         /* reset the number of slots and header */
455         *slots = mei_count_full_read_slots(dev);
456         dev->rd_msg_hdr = 0;
457
458         if (*slots == -EOVERFLOW) {
459                 /* overflow - reset */
460                 dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n");
461                 /* set the event since message has been read */
462                 ret = -ERANGE;
463                 goto end;
464         }
465 end:
466         return ret;
467 }
468 EXPORT_SYMBOL_GPL(mei_irq_read_handler);
469
470
471 /**
472  * mei_irq_write_handler -  dispatch write requests
473  *  after irq received
474  *
475  * @dev: the device structure
476  * @cmpl_list: An instance of our list structure
477  *
478  * returns 0 on success, <0 on failure.
479  */
480 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
481 {
482
483         struct mei_cl *cl;
484         struct mei_cl_cb *pos = NULL, *next = NULL;
485         struct mei_cl_cb *list;
486         s32 slots;
487         int ret;
488
489         if (!mei_hbuf_is_ready(dev)) {
490                 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
491                 return 0;
492         }
493         slots = mei_hbuf_empty_slots(dev);
494         if (slots <= 0)
495                 return -EMSGSIZE;
496
497         /* complete all waiting for write CB */
498         dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
499
500         list = &dev->write_waiting_list;
501         list_for_each_entry_safe(pos, next, &list->list, list) {
502                 cl = pos->cl;
503                 if (cl == NULL)
504                         continue;
505
506                 cl->status = 0;
507                 list_del(&pos->list);
508                 if (MEI_WRITING == cl->writing_state &&
509                     pos->fop_type == MEI_FOP_WRITE &&
510                     cl != &dev->iamthif_cl) {
511                         dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
512                         cl->writing_state = MEI_WRITE_COMPLETE;
513                         list_add_tail(&pos->list, &cmpl_list->list);
514                 }
515                 if (cl == &dev->iamthif_cl) {
516                         dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
517                         if (dev->iamthif_flow_control_pending) {
518                                 ret = mei_amthif_irq_read(dev, &slots);
519                                 if (ret)
520                                         return ret;
521                         }
522                 }
523         }
524
525         if (dev->wd_state == MEI_WD_STOPPING) {
526                 dev->wd_state = MEI_WD_IDLE;
527                 wake_up_interruptible(&dev->wait_stop_wd);
528         }
529
530         if (dev->wr_ext_msg.hdr.length) {
531                 mei_write_message(dev, &dev->wr_ext_msg.hdr,
532                                 dev->wr_ext_msg.data);
533                 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
534                 dev->wr_ext_msg.hdr.length = 0;
535         }
536         if (dev->dev_state == MEI_DEV_ENABLED) {
537                 if (dev->wd_pending &&
538                     mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
539                         if (mei_wd_send(dev))
540                                 dev_dbg(&dev->pdev->dev, "wd send failed.\n");
541                         else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
542                                 return -ENODEV;
543
544                         dev->wd_pending = false;
545
546                         if (dev->wd_state == MEI_WD_RUNNING)
547                                 slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
548                         else
549                                 slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
550                 }
551         }
552
553         /* complete control write list CB */
554         dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
555         list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
556                 cl = pos->cl;
557                 if (!cl) {
558                         list_del(&pos->list);
559                         return -ENODEV;
560                 }
561                 switch (pos->fop_type) {
562                 case MEI_FOP_CLOSE:
563                         /* send disconnect message */
564                         ret = _mei_irq_thread_close(dev, &slots, pos,
565                                                 cl, cmpl_list);
566                         if (ret)
567                                 return ret;
568
569                         break;
570                 case MEI_FOP_READ:
571                         /* send flow control message */
572                         ret = _mei_irq_thread_read(dev, &slots, pos,
573                                                 cl, cmpl_list);
574                         if (ret)
575                                 return ret;
576
577                         break;
578                 case MEI_FOP_IOCTL:
579                         /* connect message */
580                         if (mei_cl_is_other_connecting(cl))
581                                 continue;
582                         ret = _mei_irq_thread_ioctl(dev, &slots, pos,
583                                                 cl, cmpl_list);
584                         if (ret)
585                                 return ret;
586
587                         break;
588
589                 default:
590                         BUG();
591                 }
592
593         }
594         /* complete  write list CB */
595         dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
596         list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
597                 cl = pos->cl;
598                 if (cl == NULL)
599                         continue;
600                 if (mei_cl_flow_ctrl_creds(cl) <= 0) {
601                         dev_dbg(&dev->pdev->dev,
602                                 "No flow control credentials for client %d, not sending.\n",
603                                 cl->host_client_id);
604                         continue;
605                 }
606
607                 if (cl == &dev->iamthif_cl)
608                         ret = mei_amthif_irq_write_complete(dev, &slots,
609                                                         pos, cmpl_list);
610                 else
611                         ret = mei_irq_thread_write_complete(dev, &slots, pos,
612                                                 cmpl_list);
613                 if (ret)
614                         return ret;
615
616         }
617         return 0;
618 }
619 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
620
621
622
623 /**
624  * mei_timer - timer function.
625  *
626  * @work: pointer to the work_struct structure
627  *
628  * NOTE: This function is called by timer interrupt work
629  */
630 void mei_timer(struct work_struct *work)
631 {
632         unsigned long timeout;
633         struct mei_cl *cl_pos = NULL;
634         struct mei_cl *cl_next = NULL;
635         struct mei_cl_cb  *cb_pos = NULL;
636         struct mei_cl_cb  *cb_next = NULL;
637
638         struct mei_device *dev = container_of(work,
639                                         struct mei_device, timer_work.work);
640
641
642         mutex_lock(&dev->device_lock);
643         if (dev->dev_state != MEI_DEV_ENABLED) {
644                 if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
645                         if (dev->init_clients_timer) {
646                                 if (--dev->init_clients_timer == 0) {
647                                         dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
648                                                 dev->hbm_state);
649                                         mei_reset(dev, 1);
650                                 }
651                         }
652                 }
653                 goto out;
654         }
655         /*** connect/disconnect timeouts ***/
656         list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
657                 if (cl_pos->timer_count) {
658                         if (--cl_pos->timer_count == 0) {
659                                 dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n");
660                                 mei_reset(dev, 1);
661                                 goto out;
662                         }
663                 }
664         }
665
666         if (dev->iamthif_stall_timer) {
667                 if (--dev->iamthif_stall_timer == 0) {
668                         dev_err(&dev->pdev->dev, "reset: amthif  hanged.\n");
669                         mei_reset(dev, 1);
670                         dev->iamthif_msg_buf_size = 0;
671                         dev->iamthif_msg_buf_index = 0;
672                         dev->iamthif_canceled = false;
673                         dev->iamthif_ioctl = true;
674                         dev->iamthif_state = MEI_IAMTHIF_IDLE;
675                         dev->iamthif_timer = 0;
676
677                         mei_io_cb_free(dev->iamthif_current_cb);
678                         dev->iamthif_current_cb = NULL;
679
680                         dev->iamthif_file_object = NULL;
681                         mei_amthif_run_next_cmd(dev);
682                 }
683         }
684
685         if (dev->iamthif_timer) {
686
687                 timeout = dev->iamthif_timer +
688                         mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
689
690                 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
691                                 dev->iamthif_timer);
692                 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
693                 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
694                 if (time_after(jiffies, timeout)) {
695                         /*
696                          * User didn't read the AMTHI data on time (15sec)
697                          * freeing AMTHI for other requests
698                          */
699
700                         dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
701
702                         list_for_each_entry_safe(cb_pos, cb_next,
703                                 &dev->amthif_rd_complete_list.list, list) {
704
705                                 cl_pos = cb_pos->file_object->private_data;
706
707                                 /* Finding the AMTHI entry. */
708                                 if (cl_pos == &dev->iamthif_cl)
709                                         list_del(&cb_pos->list);
710                         }
711                         mei_io_cb_free(dev->iamthif_current_cb);
712                         dev->iamthif_current_cb = NULL;
713
714                         dev->iamthif_file_object->private_data = NULL;
715                         dev->iamthif_file_object = NULL;
716                         dev->iamthif_timer = 0;
717                         mei_amthif_run_next_cmd(dev);
718
719                 }
720         }
721 out:
722         schedule_delayed_work(&dev->timer_work, 2 * HZ);
723         mutex_unlock(&dev->device_lock);
724 }
725