Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / drivers / bluetooth / hci_intel.c
index 5d53185c2d996aec865558e55e68f46ec5bad724..49e25409de67b1e02be16d87154b8547579d6ca0 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
 #include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #define STATE_BOOTING          4
 #define STATE_LPM_ENABLED      5
 #define STATE_TX_ACTIVE                6
+#define STATE_SUSPENDED                7
+#define STATE_LPM_TRANSACTION  8
 
+#define HCI_LPM_WAKE_PKT 0xf0
 #define HCI_LPM_PKT 0xf1
 #define HCI_LPM_MAX_SIZE 10
 #define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE
 
 #define LPM_OP_TX_NOTIFY 0x00
+#define LPM_OP_SUSPEND_ACK 0x02
+#define LPM_OP_RESUME_ACK 0x03
+
+#define LPM_SUSPEND_DELAY_MS 1000
 
 struct hci_lpm_pkt {
        __u8 opcode;
@@ -63,6 +71,8 @@ struct intel_device {
        struct list_head list;
        struct platform_device *pdev;
        struct gpio_desc *reset;
+       struct hci_uart *hu;
+       struct mutex hu_lock;
        int irq;
 };
 
@@ -72,6 +82,8 @@ static DEFINE_MUTEX(intel_device_list_lock);
 struct intel_data {
        struct sk_buff *rx_skb;
        struct sk_buff_head txq;
+       struct work_struct busy_work;
+       struct hci_uart *hu;
        unsigned long flags;
 };
 
@@ -129,12 +141,164 @@ static int intel_wait_booting(struct hci_uart *hu)
        return err;
 }
 
+#ifdef CONFIG_PM
+static int intel_wait_lpm_transaction(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+       int err;
+
+       err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(1000));
+
+       if (err == 1) {
+               bt_dev_err(hu->hdev, "LPM transaction interrupted");
+               return -EINTR;
+       }
+
+       if (err) {
+               bt_dev_err(hu->hdev, "LPM transaction timeout");
+               return -ETIMEDOUT;
+       }
+
+       return err;
+}
+
+static int intel_lpm_suspend(struct hci_uart *hu)
+{
+       static const u8 suspend[] = { 0x01, 0x01, 0x01 };
+       struct intel_data *intel = hu->priv;
+       struct sk_buff *skb;
+
+       if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
+           test_bit(STATE_SUSPENDED, &intel->flags))
+               return 0;
+
+       if (test_bit(STATE_TX_ACTIVE, &intel->flags))
+               return -EAGAIN;
+
+       bt_dev_dbg(hu->hdev, "Suspending");
+
+       skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL);
+       if (!skb) {
+               bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+               return -ENOMEM;
+       }
+
+       memcpy(skb_put(skb, sizeof(suspend)), suspend, sizeof(suspend));
+       bt_cb(skb)->pkt_type = HCI_LPM_PKT;
+
+       set_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+       /* LPM flow is a priority, enqueue packet at list head */
+       skb_queue_head(&intel->txq, skb);
+       hci_uart_tx_wakeup(hu);
+
+       intel_wait_lpm_transaction(hu);
+       /* Even in case of failure, continue and test the suspended flag */
+
+       clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+       if (!test_bit(STATE_SUSPENDED, &intel->flags)) {
+               bt_dev_err(hu->hdev, "Device suspend error");
+               return -EINVAL;
+       }
+
+       bt_dev_dbg(hu->hdev, "Suspended");
+
+       hci_uart_set_flow_control(hu, true);
+
+       return 0;
+}
+
+static int intel_lpm_resume(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+       struct sk_buff *skb;
+
+       if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
+           !test_bit(STATE_SUSPENDED, &intel->flags))
+               return 0;
+
+       bt_dev_dbg(hu->hdev, "Resuming");
+
+       hci_uart_set_flow_control(hu, false);
+
+       skb = bt_skb_alloc(0, GFP_KERNEL);
+       if (!skb) {
+               bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+               return -ENOMEM;
+       }
+
+       bt_cb(skb)->pkt_type = HCI_LPM_WAKE_PKT;
+
+       set_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+       /* LPM flow is a priority, enqueue packet at list head */
+       skb_queue_head(&intel->txq, skb);
+       hci_uart_tx_wakeup(hu);
+
+       intel_wait_lpm_transaction(hu);
+       /* Even in case of failure, continue and test the suspended flag */
+
+       clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+       if (test_bit(STATE_SUSPENDED, &intel->flags)) {
+               bt_dev_err(hu->hdev, "Device resume error");
+               return -EINVAL;
+       }
+
+       bt_dev_dbg(hu->hdev, "Resumed");
+
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static int intel_lpm_host_wake(struct hci_uart *hu)
+{
+       static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 };
+       struct intel_data *intel = hu->priv;
+       struct sk_buff *skb;
+
+       hci_uart_set_flow_control(hu, false);
+
+       clear_bit(STATE_SUSPENDED, &intel->flags);
+
+       skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL);
+       if (!skb) {
+               bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+               return -ENOMEM;
+       }
+
+       memcpy(skb_put(skb, sizeof(lpm_resume_ack)), lpm_resume_ack,
+              sizeof(lpm_resume_ack));
+       bt_cb(skb)->pkt_type = HCI_LPM_PKT;
+
+       /* LPM flow is a priority, enqueue packet at list head */
+       skb_queue_head(&intel->txq, skb);
+       hci_uart_tx_wakeup(hu);
+
+       bt_dev_dbg(hu->hdev, "Resumed by controller");
+
+       return 0;
+}
+
 static irqreturn_t intel_irq(int irq, void *dev_id)
 {
        struct intel_device *idev = dev_id;
 
        dev_info(&idev->pdev->dev, "hci_intel irq\n");
 
+       mutex_lock(&idev->hu_lock);
+       if (idev->hu)
+               intel_lpm_host_wake(idev->hu);
+       mutex_unlock(&idev->hu_lock);
+
+       /* Host/Controller are now LPM resumed, trigger a new delayed suspend */
+       pm_runtime_get(&idev->pdev->dev);
+       pm_runtime_mark_last_busy(&idev->pdev->dev);
+       pm_runtime_put_autosuspend(&idev->pdev->dev);
+
        return IRQ_HANDLED;
 }
 
@@ -165,6 +329,15 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
 
                gpiod_set_value(idev->reset, powered);
 
+               /* Provide to idev a hu reference which is used to run LPM
+                * transactions (lpm suspend/resume) from PM callbacks.
+                * hu needs to be protected against concurrent removing during
+                * these PM ops.
+                */
+               mutex_lock(&idev->hu_lock);
+               idev->hu = powered ? hu : NULL;
+               mutex_unlock(&idev->hu_lock);
+
                if (idev->irq < 0)
                        break;
 
@@ -181,9 +354,17 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
                        }
 
                        device_wakeup_enable(&idev->pdev->dev);
+
+                       pm_runtime_set_active(&idev->pdev->dev);
+                       pm_runtime_use_autosuspend(&idev->pdev->dev);
+                       pm_runtime_set_autosuspend_delay(&idev->pdev->dev,
+                                                        LPM_SUSPEND_DELAY_MS);
+                       pm_runtime_enable(&idev->pdev->dev);
                } else if (!powered && device_may_wakeup(&idev->pdev->dev)) {
                        devm_free_irq(&idev->pdev->dev, idev->irq, idev);
                        device_wakeup_disable(&idev->pdev->dev);
+
+                       pm_runtime_disable(&idev->pdev->dev);
                }
        }
 
@@ -192,6 +373,28 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
        return err;
 }
 
+static void intel_busy_work(struct work_struct *work)
+{
+       struct list_head *p;
+       struct intel_data *intel = container_of(work, struct intel_data,
+                                               busy_work);
+
+       /* Link is busy, delay the suspend */
+       mutex_lock(&intel_device_list_lock);
+       list_for_each(p, &intel_device_list) {
+               struct intel_device *idev = list_entry(p, struct intel_device,
+                                                      list);
+
+               if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
+                       pm_runtime_get(&idev->pdev->dev);
+                       pm_runtime_mark_last_busy(&idev->pdev->dev);
+                       pm_runtime_put_autosuspend(&idev->pdev->dev);
+                       break;
+               }
+       }
+       mutex_unlock(&intel_device_list_lock);
+}
+
 static int intel_open(struct hci_uart *hu)
 {
        struct intel_data *intel;
@@ -203,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
                return -ENOMEM;
 
        skb_queue_head_init(&intel->txq);
+       INIT_WORK(&intel->busy_work, intel_busy_work);
+
+       intel->hu = hu;
 
        hu->priv = intel;
 
@@ -218,6 +424,8 @@ static int intel_close(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       cancel_work_sync(&intel->busy_work);
+
        intel_set_power(hu, false);
 
        skb_queue_purge(&intel->txq);
@@ -532,6 +740,10 @@ static int intel_setup(struct hci_uart *hu)
 
        bt_dev_info(hdev, "Found device firmware: %s", fwname);
 
+       /* Save the DDC file name for later */
+       snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc",
+                le16_to_cpu(params->dev_revid));
+
        kfree_skb(skb);
 
        if (fw->size < 644) {
@@ -724,6 +936,9 @@ done:
        set_bit(STATE_LPM_ENABLED, &intel->flags);
 
 no_lpm:
+       /* Ignore errors, device can work without DDC parameters */
+       btintel_load_ddc_config(hdev, fwname);
+
        skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
@@ -791,20 +1006,41 @@ static void intel_recv_lpm_notify(struct hci_dev *hdev, int value)
 
        bt_dev_dbg(hdev, "TX idle notification (%d)", value);
 
-       if (value)
+       if (value) {
                set_bit(STATE_TX_ACTIVE, &intel->flags);
-       else
+               schedule_work(&intel->busy_work);
+       } else {
                clear_bit(STATE_TX_ACTIVE, &intel->flags);
+       }
 }
 
 static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_lpm_pkt *lpm = (void *)skb->data;
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct intel_data *intel = hu->priv;
 
        switch (lpm->opcode) {
        case LPM_OP_TX_NOTIFY:
-               if (lpm->dlen)
-                       intel_recv_lpm_notify(hdev, lpm->data[0]);
+               if (lpm->dlen < 1) {
+                       bt_dev_err(hu->hdev, "Invalid LPM notification packet");
+                       break;
+               }
+               intel_recv_lpm_notify(hdev, lpm->data[0]);
+               break;
+       case LPM_OP_SUSPEND_ACK:
+               set_bit(STATE_SUSPENDED, &intel->flags);
+               if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
+                       smp_mb__after_atomic();
+                       wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
+               }
+               break;
+       case LPM_OP_RESUME_ACK:
+               clear_bit(STATE_SUSPENDED, &intel->flags);
+               if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
+                       smp_mb__after_atomic();
+                       wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
+               }
                break;
        default:
                bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode);
@@ -853,9 +1089,27 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
 static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 {
        struct intel_data *intel = hu->priv;
+       struct list_head *p;
 
        BT_DBG("hu %p skb %p", hu, skb);
 
+       /* Be sure our controller is resumed and potential LPM transaction
+        * completed before enqueuing any packet.
+        */
+       mutex_lock(&intel_device_list_lock);
+       list_for_each(p, &intel_device_list) {
+               struct intel_device *idev = list_entry(p, struct intel_device,
+                                                      list);
+
+               if (hu->tty->dev->parent == idev->pdev->dev.parent) {
+                       pm_runtime_get_sync(&idev->pdev->dev);
+                       pm_runtime_mark_last_busy(&idev->pdev->dev);
+                       pm_runtime_put_autosuspend(&idev->pdev->dev);
+                       break;
+               }
+       }
+       mutex_unlock(&intel_device_list_lock);
+
        skb_queue_tail(&intel->txq, skb);
 
        return 0;
@@ -929,6 +1183,59 @@ static int intel_acpi_probe(struct intel_device *idev)
 }
 #endif
 
+#ifdef CONFIG_PM
+static int intel_suspend_device(struct device *dev)
+{
+       struct intel_device *idev = dev_get_drvdata(dev);
+
+       mutex_lock(&idev->hu_lock);
+       if (idev->hu)
+               intel_lpm_suspend(idev->hu);
+       mutex_unlock(&idev->hu_lock);
+
+       return 0;
+}
+
+static int intel_resume_device(struct device *dev)
+{
+       struct intel_device *idev = dev_get_drvdata(dev);
+
+       mutex_lock(&idev->hu_lock);
+       if (idev->hu)
+               intel_lpm_resume(idev->hu);
+       mutex_unlock(&idev->hu_lock);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int intel_suspend(struct device *dev)
+{
+       struct intel_device *idev = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(idev->irq);
+
+       return intel_suspend_device(dev);
+}
+
+static int intel_resume(struct device *dev)
+{
+       struct intel_device *idev = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(idev->irq);
+
+       return intel_resume_device(dev);
+}
+#endif
+
+static const struct dev_pm_ops intel_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
+       SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL)
+};
+
 static int intel_probe(struct platform_device *pdev)
 {
        struct intel_device *idev;
@@ -937,6 +1244,8 @@ static int intel_probe(struct platform_device *pdev)
        if (!idev)
                return -ENOMEM;
 
+       mutex_init(&idev->hu_lock);
+
        idev->pdev = pdev;
 
        if (ACPI_HANDLE(&pdev->dev)) {
@@ -1013,6 +1322,7 @@ static struct platform_driver intel_driver = {
        .driver = {
                .name = "hci_intel",
                .acpi_match_table = ACPI_PTR(intel_acpi_match),
+               .pm = &intel_pm_ops,
        },
 };