IB/ehca: Add driver for IBM eHCA InfiniBand adapters
authorHeiko J Schick <schickhj.ibm.com>
Fri, 22 Sep 2006 22:22:22 +0000 (15:22 -0700)
committerRoland Dreier <rolandd@cisco.com>
Fri, 22 Sep 2006 22:22:22 +0000 (15:22 -0700)
Add a driver for IBM GX bus InfiniBand adapters, which are usable with
some pSeries/System p systems.

Signed-off-by: Heiko J Schick <schickhj.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
34 files changed:
MAINTAINERS
drivers/infiniband/Kconfig
drivers/infiniband/Makefile
drivers/infiniband/hw/ehca/Kconfig [new file with mode: 0644]
drivers/infiniband/hw/ehca/Makefile [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_av.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_classes.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_classes_pSeries.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_cq.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_eq.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_hca.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_irq.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_irq.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_iverbs.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_main.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_mcast.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_mrmw.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_mrmw.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_pd.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_qes.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_qp.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_reqs.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_sqp.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_tools.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ehca_uverbs.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/hcp_if.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/hcp_if.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/hcp_phyp.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/hcp_phyp.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/hipz_fns.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/hipz_fns_core.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/hipz_hw.h [new file with mode: 0644]
drivers/infiniband/hw/ehca/ipz_pt_fn.c [new file with mode: 0644]
drivers/infiniband/hw/ehca/ipz_pt_fn.h [new file with mode: 0644]

index ed2a83cfad7c05de65acf1c3cbcbe46df7f2dd57..830bec779d4791ec8e969d55d7efc2fc357c9562 100644 (file)
@@ -991,6 +991,14 @@ EFS FILESYSTEM
 W:     http://aeschi.ch.eu.org/efs/
 S:     Orphan
 
+EHCA (IBM GX bus InfiniBand adapter) DRIVER:
+P:     Hoang-Nam Nguyen
+M:     hnguyen@de.ibm.com
+P:     Christoph Raisch
+M:     raisch@de.ibm.com
+L:     openib-general@openib.org
+S:     Supported
+
 EMU10K1 SOUND DRIVER
 P:     James Courtier-Dutton
 M:     James@superbug.demon.co.uk
index 69a53d476b5b7ee2cb30dd88518ffb11cd73bf70..fd2d528daa3aac069bbf126f0f4860c669ec9e08 100644 (file)
@@ -36,6 +36,7 @@ config INFINIBAND_ADDR_TRANS
 
 source "drivers/infiniband/hw/mthca/Kconfig"
 source "drivers/infiniband/hw/ipath/Kconfig"
+source "drivers/infiniband/hw/ehca/Kconfig"
 
 source "drivers/infiniband/ulp/ipoib/Kconfig"
 
index c7ff58c1d0e5e9a38b0baf87de600e24a40afc3b..893bee0a50b572946149ab46fb65cdb8cee1b06a 100644 (file)
@@ -1,6 +1,7 @@
 obj-$(CONFIG_INFINIBAND)               += core/
 obj-$(CONFIG_INFINIBAND_MTHCA)         += hw/mthca/
 obj-$(CONFIG_IPATH_CORE)               += hw/ipath/
+obj-$(CONFIG_INFINIBAND_EHCA)          += hw/ehca/
 obj-$(CONFIG_INFINIBAND_IPOIB)         += ulp/ipoib/
 obj-$(CONFIG_INFINIBAND_SRP)           += ulp/srp/
 obj-$(CONFIG_INFINIBAND_ISER)          += ulp/iser/
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig
new file mode 100644 (file)
index 0000000..922389b
--- /dev/null
@@ -0,0 +1,16 @@
+config INFINIBAND_EHCA
+       tristate "eHCA support"
+       depends on IBMEBUS && INFINIBAND
+       ---help---
+       This driver supports the IBM pSeries eHCA InfiniBand adapter.
+
+       To compile the driver as a module, choose M here. The module
+       will be called ib_ehca.
+
+config INFINIBAND_EHCA_SCALING
+       bool "Scaling support (EXPERIMENTAL)"
+       depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
+       ---help---
+       eHCA scaling support schedules the CQ callbacks to different CPUs.
+
+       To enable this feature choose Y here.
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/infiniband/hw/ehca/Makefile
new file mode 100644 (file)
index 0000000..74d284e
--- /dev/null
@@ -0,0 +1,16 @@
+#  Authors: Heiko J Schick <schickhj@de.ibm.com>
+#           Christoph Raisch <raisch@de.ibm.com>
+#           Joachim Fenkes <fenkes@de.ibm.com>
+#
+#  Copyright (c) 2005 IBM Corporation
+#
+#  All rights reserved.
+#
+#  This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
+
+obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
+
+ib_ehca-objs  = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
+               ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
+               ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
+
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
new file mode 100644 (file)
index 0000000..3bac197
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  adress vector functions
+ *
+ *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Khadija Souissi <souissik@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/current.h>
+
+#include "ehca_tools.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+
+static struct kmem_cache *av_cache;
+
+struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+       int ret;
+       struct ehca_av *av;
+       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+                                             ib_device);
+
+       av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
+       if (!av) {
+               ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
+                        pd, ah_attr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       av->av.sl = ah_attr->sl;
+       av->av.dlid = ah_attr->dlid;
+       av->av.slid_path_bits = ah_attr->src_path_bits;
+
+       if (ehca_static_rate < 0) {
+               int ah_mult = ib_rate_to_mult(ah_attr->static_rate);
+               int ehca_mult =
+                       ib_rate_to_mult(shca->sport[ah_attr->port_num].rate );
+
+               if (ah_mult >= ehca_mult)
+                       av->av.ipd = 0;
+               else
+                       av->av.ipd = (ah_mult > 0) ?
+                               ((ehca_mult - 1) / ah_mult) : 0;
+       } else
+               av->av.ipd = ehca_static_rate;
+
+       av->av.lnh = ah_attr->ah_flags;
+       av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
+       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
+                                           ah_attr->grh.traffic_class);
+       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
+                                           ah_attr->grh.flow_label);
+       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
+                                           ah_attr->grh.hop_limit);
+       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
+       /* set sgid in grh.word_1 */
+       if (ah_attr->ah_flags & IB_AH_GRH) {
+               int rc;
+               struct ib_port_attr port_attr;
+               union ib_gid gid;
+               memset(&port_attr, 0, sizeof(port_attr));
+               rc = ehca_query_port(pd->device, ah_attr->port_num,
+                                    &port_attr);
+               if (rc) { /* invalid port number */
+                       ret = -EINVAL;
+                       ehca_err(pd->device, "Invalid port number "
+                                "ehca_query_port() returned %x "
+                                "pd=%p ah_attr=%p", rc, pd, ah_attr);
+                       goto create_ah_exit1;
+               }
+               memset(&gid, 0, sizeof(gid));
+               rc = ehca_query_gid(pd->device,
+                                   ah_attr->port_num,
+                                   ah_attr->grh.sgid_index, &gid);
+               if (rc) {
+                       ret = -EINVAL;
+                       ehca_err(pd->device, "Failed to retrieve sgid "
+                                "ehca_query_gid() returned %x "
+                                "pd=%p ah_attr=%p", rc, pd, ah_attr);
+                       goto create_ah_exit1;
+               }
+               memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
+       }
+       /* for the time being we use a hard coded PMTU of 2048 Bytes */
+       av->av.pmtu = 4;
+
+       /* dgid comes in grh.word_3 */
+       memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
+              sizeof(ah_attr->grh.dgid));
+
+       return &av->ib_ah;
+
+create_ah_exit1:
+       kmem_cache_free(av_cache, av);
+
+       return ERR_PTR(ret);
+}
+
+int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+{
+       struct ehca_av *av;
+       struct ehca_ud_av new_ehca_av;
+       struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+       u32 cur_pid = current->tgid;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       memset(&new_ehca_av, 0, sizeof(new_ehca_av));
+       new_ehca_av.sl = ah_attr->sl;
+       new_ehca_av.dlid = ah_attr->dlid;
+       new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
+       new_ehca_av.ipd = ah_attr->static_rate;
+       new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
+                                        (ah_attr->ah_flags & IB_AH_GRH) > 0);
+       new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
+                                               ah_attr->grh.traffic_class);
+       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
+                                                ah_attr->grh.flow_label);
+       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
+                                                ah_attr->grh.hop_limit);
+       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
+
+       /* set sgid in grh.word_1 */
+       if (ah_attr->ah_flags & IB_AH_GRH) {
+               int rc;
+               struct ib_port_attr port_attr;
+               union ib_gid gid;
+               memset(&port_attr, 0, sizeof(port_attr));
+               rc = ehca_query_port(ah->device, ah_attr->port_num,
+                                    &port_attr);
+               if (rc) { /* invalid port number */
+                       ehca_err(ah->device, "Invalid port number "
+                                "ehca_query_port() returned %x "
+                                "ah=%p ah_attr=%p port_num=%x",
+                                rc, ah, ah_attr, ah_attr->port_num);
+                       return -EINVAL;
+               }
+               memset(&gid, 0, sizeof(gid));
+               rc = ehca_query_gid(ah->device,
+                                   ah_attr->port_num,
+                                   ah_attr->grh.sgid_index, &gid);
+               if (rc) {
+                       ehca_err(ah->device, "Failed to retrieve sgid "
+                                "ehca_query_gid() returned %x "
+                                "ah=%p ah_attr=%p port_num=%x "
+                                "sgid_index=%x",
+                                rc, ah, ah_attr, ah_attr->port_num,
+                                ah_attr->grh.sgid_index);
+                       return -EINVAL;
+               }
+               memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
+       }
+
+       new_ehca_av.pmtu = 4; /* see also comment in create_ah() */
+
+       memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
+              sizeof(ah_attr->grh.dgid));
+
+       av = container_of(ah, struct ehca_av, ib_ah);
+       av->av = new_ehca_av;
+
+       return 0;
+}
+
+int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+{
+       struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
+       struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+       u32 cur_pid = current->tgid;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
+              sizeof(ah_attr->grh.dgid));
+       ah_attr->sl = av->av.sl;
+
+       ah_attr->dlid = av->av.dlid;
+
+       ah_attr->src_path_bits = av->av.slid_path_bits;
+       ah_attr->static_rate = av->av.ipd;
+       ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
+       ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
+                                                   av->av.grh.word_0);
+       ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
+                                               av->av.grh.word_0);
+       ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
+                                                av->av.grh.word_0);
+
+       return 0;
+}
+
+int ehca_destroy_ah(struct ib_ah *ah)
+{
+       struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+       u32 cur_pid = current->tgid;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
+
+       return 0;
+}
+
+int ehca_init_av_cache(void)
+{
+       av_cache = kmem_cache_create("ehca_cache_av",
+                                  sizeof(struct ehca_av), 0,
+                                  SLAB_HWCACHE_ALIGN,
+                                  NULL, NULL);
+       if (!av_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void ehca_cleanup_av_cache(void)
+{
+       if (av_cache)
+               kmem_cache_destroy(av_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
new file mode 100644 (file)
index 0000000..1c72203
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Struct definition for eHCA internal structures
+ *
+ *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_CLASSES_H__
+#define __EHCA_CLASSES_H__
+
+#include "ehca_classes.h"
+#include "ipz_pt_fn.h"
+
+struct ehca_module;
+struct ehca_qp;
+struct ehca_cq;
+struct ehca_eq;
+struct ehca_mr;
+struct ehca_mw;
+struct ehca_pd;
+struct ehca_av;
+
+#ifdef CONFIG_PPC64
+#include "ehca_classes_pSeries.h"
+#endif
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "ehca_irq.h"
+
+struct ehca_eq {
+       u32 length;
+       struct ipz_queue ipz_queue;
+       struct ipz_eq_handle ipz_eq_handle;
+       struct work_struct work;
+       struct h_galpas galpas;
+       int is_initialized;
+       struct ehca_pfeq pf;
+       spinlock_t spinlock;
+       struct tasklet_struct interrupt_task;
+       u32 ist;
+};
+
+struct ehca_sport {
+       struct ib_cq *ibcq_aqp1;
+       struct ib_qp *ibqp_aqp1;
+       enum ib_rate  rate;
+       enum ib_port_state port_state;
+};
+
+struct ehca_shca {
+       struct ib_device ib_device;
+       struct ibmebus_dev *ibmebus_dev;
+       u8 num_ports;
+       int hw_level;
+       struct list_head shca_list;
+       struct ipz_adapter_handle ipz_hca_handle;
+       struct ehca_sport sport[2];
+       struct ehca_eq eq;
+       struct ehca_eq neq;
+       struct ehca_mr *maxmr;
+       struct ehca_pd *pd;
+       struct h_galpas galpas;
+};
+
+struct ehca_pd {
+       struct ib_pd ib_pd;
+       struct ipz_pd fw_pd;
+       u32 ownpid;
+};
+
+struct ehca_qp {
+       struct ib_qp ib_qp;
+       u32 qp_type;
+       struct ipz_queue ipz_squeue;
+       struct ipz_queue ipz_rqueue;
+       struct h_galpas galpas;
+       u32 qkey;
+       u32 real_qp_num;
+       u32 token;
+       spinlock_t spinlock_s;
+       spinlock_t spinlock_r;
+       u32 sq_max_inline_data_size;
+       struct ipz_qp_handle ipz_qp_handle;
+       struct ehca_pfqp pf;
+       struct ib_qp_init_attr init_attr;
+       u64 uspace_squeue;
+       u64 uspace_rqueue;
+       u64 uspace_fwh;
+       struct ehca_cq *send_cq;
+       struct ehca_cq *recv_cq;
+       unsigned int sqerr_purgeflag;
+       struct hlist_node list_entries;
+};
+
+/* must be power of 2 */
+#define QP_HASHTAB_LEN 8
+
+struct ehca_cq {
+       struct ib_cq ib_cq;
+       struct ipz_queue ipz_queue;
+       struct h_galpas galpas;
+       spinlock_t spinlock;
+       u32 cq_number;
+       u32 token;
+       u32 nr_of_entries;
+       struct ipz_cq_handle ipz_cq_handle;
+       struct ehca_pfcq pf;
+       spinlock_t cb_lock;
+       u64 uspace_queue;
+       u64 uspace_fwh;
+       struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
+       struct list_head entry;
+       u32 nr_callbacks;
+       spinlock_t task_lock;
+       u32 ownpid;
+};
+
+enum ehca_mr_flag {
+       EHCA_MR_FLAG_FMR = 0x80000000,   /* FMR, created with ehca_alloc_fmr */
+       EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR                           */
+};
+
+struct ehca_mr {
+       union {
+               struct ib_mr ib_mr;     /* must always be first in ehca_mr */
+               struct ib_fmr ib_fmr;   /* must always be first in ehca_mr */
+       } ib;
+       spinlock_t mrlock;
+
+       enum ehca_mr_flag flags;
+       u32 num_pages;          /* number of MR pages */
+       u32 num_4k;             /* number of 4k "page" portions to form MR */
+       int acl;                /* ACL (stored here for usage in reregister) */
+       u64 *start;             /* virtual start address (stored here for */
+                               /* usage in reregister) */
+       u64 size;               /* size (stored here for usage in reregister) */
+       u32 fmr_page_size;      /* page size for FMR */
+       u32 fmr_max_pages;      /* max pages for FMR */
+       u32 fmr_max_maps;       /* max outstanding maps for FMR */
+       u32 fmr_map_cnt;        /* map counter for FMR */
+       /* fw specific data */
+       struct ipz_mrmw_handle ipz_mr_handle;   /* MR handle for h-calls */
+       struct h_galpas galpas;
+       /* data for userspace bridge */
+       u32 nr_of_pages;
+       void *pagearray;
+};
+
+struct ehca_mw {
+       struct ib_mw ib_mw;     /* gen2 mw, must always be first in ehca_mw */
+       spinlock_t mwlock;
+
+       u8 never_bound;         /* indication MW was never bound */
+       struct ipz_mrmw_handle ipz_mw_handle;   /* MW handle for h-calls */
+       struct h_galpas galpas;
+};
+
+enum ehca_mr_pgi_type {
+       EHCA_MR_PGI_PHYS   = 1,  /* type of ehca_reg_phys_mr,
+                                 * ehca_rereg_phys_mr,
+                                 * ehca_reg_internal_maxmr */
+       EHCA_MR_PGI_USER   = 2,  /* type of ehca_reg_user_mr */
+       EHCA_MR_PGI_FMR    = 3   /* type of ehca_map_phys_fmr */
+};
+
+struct ehca_mr_pginfo {
+       enum ehca_mr_pgi_type type;
+       u64 num_pages;
+       u64 page_cnt;
+       u64 num_4k;       /* number of 4k "page" portions */
+       u64 page_4k_cnt;  /* counter for 4k "page" portions */
+       u64 next_4k;      /* next 4k "page" portion in buffer/chunk/listelem */
+
+       /* type EHCA_MR_PGI_PHYS section */
+       int num_phys_buf;
+       struct ib_phys_buf *phys_buf_array;
+       u64 next_buf;
+
+       /* type EHCA_MR_PGI_USER section */
+       struct ib_umem *region;
+       struct ib_umem_chunk *next_chunk;
+       u64 next_nmap;
+
+       /* type EHCA_MR_PGI_FMR section */
+       u64 *page_list;
+       u64 next_listelem;
+       /* next_4k also used within EHCA_MR_PGI_FMR */
+};
+
+/* output parameters for MR/FMR hipz calls */
+struct ehca_mr_hipzout_parms {
+       struct ipz_mrmw_handle handle;
+       u32 lkey;
+       u32 rkey;
+       u64 len;
+       u64 vaddr;
+       u32 acl;
+};
+
+/* output parameters for MW hipz calls */
+struct ehca_mw_hipzout_parms {
+       struct ipz_mrmw_handle handle;
+       u32 rkey;
+};
+
+struct ehca_av {
+       struct ib_ah ib_ah;
+       struct ehca_ud_av av;
+};
+
+struct ehca_ucontext {
+       struct ib_ucontext ib_ucontext;
+};
+
+struct ehca_module *ehca_module_new(void);
+
+int ehca_module_delete(struct ehca_module *me);
+
+int ehca_eq_ctor(struct ehca_eq *eq);
+
+int ehca_eq_dtor(struct ehca_eq *eq);
+
+struct ehca_shca *ehca_shca_new(void);
+
+int ehca_shca_delete(struct ehca_shca *me);
+
+struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
+
+int ehca_init_pd_cache(void);
+void ehca_cleanup_pd_cache(void);
+int ehca_init_cq_cache(void);
+void ehca_cleanup_cq_cache(void);
+int ehca_init_qp_cache(void);
+void ehca_cleanup_qp_cache(void);
+int ehca_init_av_cache(void);
+void ehca_cleanup_av_cache(void);
+int ehca_init_mrmw_cache(void);
+void ehca_cleanup_mrmw_cache(void);
+
+extern spinlock_t ehca_qp_idr_lock;
+extern spinlock_t ehca_cq_idr_lock;
+extern struct idr ehca_qp_idr;
+extern struct idr ehca_cq_idr;
+
+extern int ehca_static_rate;
+extern int ehca_port_act_time;
+extern int ehca_use_hp_mr;
+
+struct ipzu_queue_resp {
+       u64 queue;        /* points to first queue entry */
+       u32 qe_size;      /* queue entry size */
+       u32 act_nr_of_sg;
+       u32 queue_length; /* queue length allocated in bytes */
+       u32 pagesize;
+       u32 toggle_state;
+       u32 dummy; /* padding for 8 byte alignment */
+};
+
+struct ehca_create_cq_resp {
+       u32 cq_number;
+       u32 token;
+       struct ipzu_queue_resp ipz_queue;
+       struct h_galpas galpas;
+};
+
+struct ehca_create_qp_resp {
+       u32 qp_num;
+       u32 token;
+       u32 qp_type;
+       u32 qkey;
+       /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
+       u32 real_qp_num;
+       u32 dummy; /* padding for 8 byte alignment */
+       struct ipzu_queue_resp ipz_squeue;
+       struct ipzu_queue_resp ipz_rqueue;
+       struct h_galpas galpas;
+};
+
+struct ehca_alloc_cq_parms {
+       u32 nr_cqe;
+       u32 act_nr_of_entries;
+       u32 act_pages;
+       struct ipz_eq_handle eq_handle;
+};
+
+struct ehca_alloc_qp_parms {
+       int servicetype;
+       int sigtype;
+       int daqp_ctrl;
+       int max_send_sge;
+       int max_recv_sge;
+       int ud_av_l_key_ctl;
+
+       u16 act_nr_send_wqes;
+       u16 act_nr_recv_wqes;
+       u8  act_nr_recv_sges;
+       u8  act_nr_send_sges;
+
+       u32 nr_rq_pages;
+       u32 nr_sq_pages;
+
+       struct ipz_eq_handle ipz_eq_handle;
+       struct ipz_pd pd;
+};
+
+int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
+int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
+struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
new file mode 100644 (file)
index 0000000..5665f21
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  pSeries interface definitions
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_CLASSES_PSERIES_H__
+#define __EHCA_CLASSES_PSERIES_H__
+
+#include "hcp_phyp.h"
+#include "ipz_pt_fn.h"
+
+
+struct ehca_pfqp {
+       struct ipz_qpt sqpt;
+       struct ipz_qpt rqpt;
+};
+
+struct ehca_pfcq {
+       struct ipz_qpt qpt;
+       u32 cqnr;
+};
+
+struct ehca_pfeq {
+       struct ipz_qpt qpt;
+       struct h_galpa galpa;
+       u32 eqnr;
+};
+
+struct ipz_adapter_handle {
+       u64 handle;
+};
+
+struct ipz_cq_handle {
+       u64 handle;
+};
+
+struct ipz_eq_handle {
+       u64 handle;
+};
+
+struct ipz_qp_handle {
+       u64 handle;
+};
+struct ipz_mrmw_handle {
+       u64 handle;
+};
+
+struct ipz_pd {
+       u32 value;
+};
+
+struct hcp_modify_qp_control_block {
+       u32 qkey;                      /* 00 */
+       u32 rdd;                       /* reliable datagram domain */
+       u32 send_psn;                  /* 02 */
+       u32 receive_psn;               /* 03 */
+       u32 prim_phys_port;            /* 04 */
+       u32 alt_phys_port;             /* 05 */
+       u32 prim_p_key_idx;            /* 06 */
+       u32 alt_p_key_idx;             /* 07 */
+       u32 rdma_atomic_ctrl;          /* 08 */
+       u32 qp_state;                  /* 09 */
+       u32 reserved_10;               /* 10 */
+       u32 rdma_nr_atomic_resp_res;   /* 11 */
+       u32 path_migration_state;      /* 12 */
+       u32 rdma_atomic_outst_dest_qp; /* 13 */
+       u32 dest_qp_nr;                /* 14 */
+       u32 min_rnr_nak_timer_field;   /* 15 */
+       u32 service_level;             /* 16 */
+       u32 send_grh_flag;             /* 17 */
+       u32 retry_count;               /* 18 */
+       u32 timeout;                   /* 19 */
+       u32 path_mtu;                  /* 20 */
+       u32 max_static_rate;           /* 21 */
+       u32 dlid;                      /* 22 */
+       u32 rnr_retry_count;           /* 23 */
+       u32 source_path_bits;          /* 24 */
+       u32 traffic_class;             /* 25 */
+       u32 hop_limit;                 /* 26 */
+       u32 source_gid_idx;            /* 27 */
+       u32 flow_label;                /* 28 */
+       u32 reserved_29;               /* 29 */
+       union {                        /* 30 */
+               u64 dw[2];
+               u8 byte[16];
+       } dest_gid;
+       u32 service_level_al;          /* 34 */
+       u32 send_grh_flag_al;          /* 35 */
+       u32 retry_count_al;            /* 36 */
+       u32 timeout_al;                /* 37 */
+       u32 max_static_rate_al;        /* 38 */
+       u32 dlid_al;                   /* 39 */
+       u32 rnr_retry_count_al;        /* 40 */
+       u32 source_path_bits_al;       /* 41 */
+       u32 traffic_class_al;          /* 42 */
+       u32 hop_limit_al;              /* 43 */
+       u32 source_gid_idx_al;         /* 44 */
+       u32 flow_label_al;             /* 45 */
+       u32 reserved_46;               /* 46 */
+       u32 reserved_47;               /* 47 */
+       union {                        /* 48 */
+               u64 dw[2];
+               u8 byte[16];
+       } dest_gid_al;
+       u32 max_nr_outst_send_wr;      /* 52 */
+       u32 max_nr_outst_recv_wr;      /* 53 */
+       u32 disable_ete_credit_check;  /* 54 */
+       u32 qp_number;                 /* 55 */
+       u64 send_queue_handle;         /* 56 */
+       u64 recv_queue_handle;         /* 58 */
+       u32 actual_nr_sges_in_sq_wqe;  /* 60 */
+       u32 actual_nr_sges_in_rq_wqe;  /* 61 */
+       u32 qp_enable;                 /* 62 */
+       u32 curr_srq_limit;            /* 63 */
+       u64 qp_aff_asyn_ev_log_reg;    /* 64 */
+       u64 shared_rq_hndl;            /* 66 */
+       u64 trigg_doorbell_qp_hndl;    /* 68 */
+       u32 reserved_70_127[58];       /* 70 */
+};
+
+#define MQPCB_MASK_QKEY                         EHCA_BMASK_IBM(0,0)
+#define MQPCB_MASK_SEND_PSN                     EHCA_BMASK_IBM(2,2)
+#define MQPCB_MASK_RECEIVE_PSN                  EHCA_BMASK_IBM(3,3)
+#define MQPCB_MASK_PRIM_PHYS_PORT               EHCA_BMASK_IBM(4,4)
+#define MQPCB_PRIM_PHYS_PORT                    EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_ALT_PHYS_PORT                EHCA_BMASK_IBM(5,5)
+#define MQPCB_MASK_PRIM_P_KEY_IDX               EHCA_BMASK_IBM(6,6)
+#define MQPCB_PRIM_P_KEY_IDX                    EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_ALT_P_KEY_IDX                EHCA_BMASK_IBM(7,7)
+#define MQPCB_MASK_RDMA_ATOMIC_CTRL             EHCA_BMASK_IBM(8,8)
+#define MQPCB_MASK_QP_STATE                     EHCA_BMASK_IBM(9,9)
+#define MQPCB_QP_STATE                          EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES      EHCA_BMASK_IBM(11,11)
+#define MQPCB_MASK_PATH_MIGRATION_STATE         EHCA_BMASK_IBM(12,12)
+#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP    EHCA_BMASK_IBM(13,13)
+#define MQPCB_MASK_DEST_QP_NR                   EHCA_BMASK_IBM(14,14)
+#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD      EHCA_BMASK_IBM(15,15)
+#define MQPCB_MASK_SERVICE_LEVEL                EHCA_BMASK_IBM(16,16)
+#define MQPCB_MASK_SEND_GRH_FLAG                EHCA_BMASK_IBM(17,17)
+#define MQPCB_MASK_RETRY_COUNT                  EHCA_BMASK_IBM(18,18)
+#define MQPCB_MASK_TIMEOUT                      EHCA_BMASK_IBM(19,19)
+#define MQPCB_MASK_PATH_MTU                     EHCA_BMASK_IBM(20,20)
+#define MQPCB_PATH_MTU                          EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_MAX_STATIC_RATE              EHCA_BMASK_IBM(21,21)
+#define MQPCB_MAX_STATIC_RATE                   EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_DLID                         EHCA_BMASK_IBM(22,22)
+#define MQPCB_DLID                              EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_RNR_RETRY_COUNT              EHCA_BMASK_IBM(23,23)
+#define MQPCB_RNR_RETRY_COUNT                   EHCA_BMASK_IBM(29,31)
+#define MQPCB_MASK_SOURCE_PATH_BITS             EHCA_BMASK_IBM(24,24)
+#define MQPCB_SOURCE_PATH_BITS                  EHCA_BMASK_IBM(25,31)
+#define MQPCB_MASK_TRAFFIC_CLASS                EHCA_BMASK_IBM(25,25)
+#define MQPCB_TRAFFIC_CLASS                     EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_HOP_LIMIT                    EHCA_BMASK_IBM(26,26)
+#define MQPCB_HOP_LIMIT                         EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_SOURCE_GID_IDX               EHCA_BMASK_IBM(27,27)
+#define MQPCB_SOURCE_GID_IDX                    EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_FLOW_LABEL                   EHCA_BMASK_IBM(28,28)
+#define MQPCB_FLOW_LABEL                        EHCA_BMASK_IBM(12,31)
+#define MQPCB_MASK_DEST_GID                     EHCA_BMASK_IBM(30,30)
+#define MQPCB_MASK_SERVICE_LEVEL_AL             EHCA_BMASK_IBM(31,31)
+#define MQPCB_SERVICE_LEVEL_AL                  EHCA_BMASK_IBM(28,31)
+#define MQPCB_MASK_SEND_GRH_FLAG_AL             EHCA_BMASK_IBM(32,32)
+#define MQPCB_SEND_GRH_FLAG_AL                  EHCA_BMASK_IBM(31,31)
+#define MQPCB_MASK_RETRY_COUNT_AL               EHCA_BMASK_IBM(33,33)
+#define MQPCB_RETRY_COUNT_AL                    EHCA_BMASK_IBM(29,31)
+#define MQPCB_MASK_TIMEOUT_AL                   EHCA_BMASK_IBM(34,34)
+#define MQPCB_TIMEOUT_AL                        EHCA_BMASK_IBM(27,31)
+#define MQPCB_MASK_MAX_STATIC_RATE_AL           EHCA_BMASK_IBM(35,35)
+#define MQPCB_MAX_STATIC_RATE_AL                EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_DLID_AL                      EHCA_BMASK_IBM(36,36)
+#define MQPCB_DLID_AL                           EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_RNR_RETRY_COUNT_AL           EHCA_BMASK_IBM(37,37)
+#define MQPCB_RNR_RETRY_COUNT_AL                EHCA_BMASK_IBM(29,31)
+#define MQPCB_MASK_SOURCE_PATH_BITS_AL          EHCA_BMASK_IBM(38,38)
+#define MQPCB_SOURCE_PATH_BITS_AL               EHCA_BMASK_IBM(25,31)
+#define MQPCB_MASK_TRAFFIC_CLASS_AL             EHCA_BMASK_IBM(39,39)
+#define MQPCB_TRAFFIC_CLASS_AL                  EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_HOP_LIMIT_AL                 EHCA_BMASK_IBM(40,40)
+#define MQPCB_HOP_LIMIT_AL                      EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_SOURCE_GID_IDX_AL            EHCA_BMASK_IBM(41,41)
+#define MQPCB_SOURCE_GID_IDX_AL                 EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_FLOW_LABEL_AL                EHCA_BMASK_IBM(42,42)
+#define MQPCB_FLOW_LABEL_AL                     EHCA_BMASK_IBM(12,31)
+#define MQPCB_MASK_DEST_GID_AL                  EHCA_BMASK_IBM(44,44)
+#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR         EHCA_BMASK_IBM(45,45)
+#define MQPCB_MAX_NR_OUTST_SEND_WR              EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR         EHCA_BMASK_IBM(46,46)
+#define MQPCB_MAX_NR_OUTST_RECV_WR              EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK     EHCA_BMASK_IBM(47,47)
+#define MQPCB_DISABLE_ETE_CREDIT_CHECK          EHCA_BMASK_IBM(31,31)
+#define MQPCB_QP_NUMBER                         EHCA_BMASK_IBM(8,31)
+#define MQPCB_MASK_QP_ENABLE                    EHCA_BMASK_IBM(48,48)
+#define MQPCB_QP_ENABLE                         EHCA_BMASK_IBM(31,31)
+#define MQPCB_MASK_CURR_SQR_LIMIT               EHCA_BMASK_IBM(49,49)
+#define MQPCB_CURR_SQR_LIMIT                    EHCA_BMASK_IBM(15,31)
+#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG       EHCA_BMASK_IBM(50,50)
+#define MQPCB_MASK_SHARED_RQ_HNDL               EHCA_BMASK_IBM(51,51)
+
+#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
new file mode 100644 (file)
index 0000000..458fe19
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Completion queue handling
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Khadija Souissi <souissi@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_iverbs.h"
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "hcp_if.h"
+
+static struct kmem_cache *cq_cache;
+
+int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
+{
+       unsigned int qp_num = qp->real_qp_num;
+       unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
+       unsigned long spl_flags;
+
+       spin_lock_irqsave(&cq->spinlock, spl_flags);
+       hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
+       spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+
+       ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
+                cq->cq_number, qp_num);
+
+       return 0;
+}
+
+int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
+{
+       int ret = -EINVAL;
+       unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+       struct hlist_node *iter;
+       struct ehca_qp *qp;
+       unsigned long spl_flags;
+
+       spin_lock_irqsave(&cq->spinlock, spl_flags);
+       hlist_for_each(iter, &cq->qp_hashtab[key]) {
+               qp = hlist_entry(iter, struct ehca_qp, list_entries);
+               if (qp->real_qp_num == real_qp_num) {
+                       hlist_del(iter);
+                       ehca_dbg(cq->ib_cq.device,
+                                "removed qp from cq .cq_num=%x real_qp_num=%x",
+                                cq->cq_number, real_qp_num);
+                       ret = 0;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+       if (ret)
+               ehca_err(cq->ib_cq.device,
+                        "qp not found cq_num=%x real_qp_num=%x",
+                        cq->cq_number, real_qp_num);
+
+       return ret;
+}
+
+struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
+{
+       struct ehca_qp *ret = NULL;
+       unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+       struct hlist_node *iter;
+       struct ehca_qp *qp;
+       hlist_for_each(iter, &cq->qp_hashtab[key]) {
+               qp = hlist_entry(iter, struct ehca_qp, list_entries);
+               if (qp->real_qp_num == real_qp_num) {
+                       ret = qp;
+                       break;
+               }
+       }
+       return ret;
+}
+
+struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
+                            struct ib_ucontext *context,
+                            struct ib_udata *udata)
+{
+       static const u32 additional_cqe = 20;
+       struct ib_cq *cq;
+       struct ehca_cq *my_cq;
+       struct ehca_shca *shca =
+               container_of(device, struct ehca_shca, ib_device);
+       struct ipz_adapter_handle adapter_handle;
+       struct ehca_alloc_cq_parms param; /* h_call's out parameters */
+       struct h_galpa gal;
+       void *vpage;
+       u32 counter;
+       u64 rpage, cqx_fec, h_ret;
+       int ipz_rc, ret, i;
+       unsigned long flags;
+
+       if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
+               return ERR_PTR(-EINVAL);
+
+       my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+       if (!my_cq) {
+               ehca_err(device, "Out of memory for ehca_cq struct device=%p",
+                        device);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       memset(my_cq, 0, sizeof(struct ehca_cq));
+       memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
+
+       spin_lock_init(&my_cq->spinlock);
+       spin_lock_init(&my_cq->cb_lock);
+       spin_lock_init(&my_cq->task_lock);
+       my_cq->ownpid = current->tgid;
+
+       cq = &my_cq->ib_cq;
+
+       adapter_handle = shca->ipz_hca_handle;
+       param.eq_handle = shca->eq.ipz_eq_handle;
+
+       do {
+               if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
+                       cq = ERR_PTR(-ENOMEM);
+                       ehca_err(device, "Can't reserve idr nr. device=%p",
+                                device);
+                       goto create_cq_exit1;
+               }
+
+               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
+               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+       } while (ret == -EAGAIN);
+
+       if (ret) {
+               cq = ERR_PTR(-ENOMEM);
+               ehca_err(device, "Can't allocate new idr entry. device=%p",
+                        device);
+               goto create_cq_exit1;
+       }
+
+       /*
+        * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
+        * for receiving errors CQEs.
+        */
+       param.nr_cqe = cqe + additional_cqe;
+       h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
+
+       if (h_ret != H_SUCCESS) {
+               ehca_err(device, "hipz_h_alloc_resource_cq() failed "
+                        "h_ret=%lx device=%p", h_ret, device);
+               cq = ERR_PTR(ehca2ib_return_code(h_ret));
+               goto create_cq_exit2;
+       }
+
+       ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
+                               EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
+       if (!ipz_rc) {
+               ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
+                        ipz_rc, device);
+               cq = ERR_PTR(-EINVAL);
+               goto create_cq_exit3;
+       }
+
+       for (counter = 0; counter < param.act_pages; counter++) {
+               vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
+               if (!vpage) {
+                       ehca_err(device, "ipz_qpageit_get_inc() "
+                                "returns NULL device=%p", device);
+                       cq = ERR_PTR(-EAGAIN);
+                       goto create_cq_exit4;
+               }
+               rpage = virt_to_abs(vpage);
+
+               h_ret = hipz_h_register_rpage_cq(adapter_handle,
+                                                my_cq->ipz_cq_handle,
+                                                &my_cq->pf,
+                                                0,
+                                                0,
+                                                rpage,
+                                                1,
+                                                my_cq->galpas.
+                                                kernel);
+
+               if (h_ret < H_SUCCESS) {
+                       ehca_err(device, "hipz_h_register_rpage_cq() failed "
+                                "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i "
+                                "act_pages=%i", my_cq, my_cq->cq_number,
+                                h_ret, counter, param.act_pages);
+                       cq = ERR_PTR(-EINVAL);
+                       goto create_cq_exit4;
+               }
+
+               if (counter == (param.act_pages - 1)) {
+                       vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
+                       if ((h_ret != H_SUCCESS) || vpage) {
+                               ehca_err(device, "Registration of pages not "
+                                        "complete ehca_cq=%p cq_num=%x "
+                                        "h_ret=%lx", my_cq, my_cq->cq_number,
+                                        h_ret);
+                               cq = ERR_PTR(-EAGAIN);
+                               goto create_cq_exit4;
+                       }
+               } else {
+                       if (h_ret != H_PAGE_REGISTERED) {
+                               ehca_err(device, "Registration of page failed "
+                                        "ehca_cq=%p cq_num=%x h_ret=%lx"
+                                        "counter=%i act_pages=%i",
+                                        my_cq, my_cq->cq_number,
+                                        h_ret, counter, param.act_pages);
+                               cq = ERR_PTR(-ENOMEM);
+                               goto create_cq_exit4;
+                       }
+               }
+       }
+
+       ipz_qeit_reset(&my_cq->ipz_queue);
+
+       gal = my_cq->galpas.kernel;
+       cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
+       ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+                my_cq, my_cq->cq_number, cqx_fec);
+
+       my_cq->ib_cq.cqe = my_cq->nr_of_entries =
+               param.act_nr_of_entries - additional_cqe;
+       my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
+
+       for (i = 0; i < QP_HASHTAB_LEN; i++)
+               INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
+
+       if (context) {
+               struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
+               struct ehca_create_cq_resp resp;
+               struct vm_area_struct *vma;
+               memset(&resp, 0, sizeof(resp));
+               resp.cq_number = my_cq->cq_number;
+               resp.token = my_cq->token;
+               resp.ipz_queue.qe_size = ipz_queue->qe_size;
+               resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
+               resp.ipz_queue.queue_length = ipz_queue->queue_length;
+               resp.ipz_queue.pagesize = ipz_queue->pagesize;
+               resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
+               ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
+                                      ipz_queue->queue_length,
+                                      (void**)&resp.ipz_queue.queue,
+                                      &vma);
+               if (ret) {
+                       ehca_err(device, "Could not mmap queue pages");
+                       cq = ERR_PTR(ret);
+                       goto create_cq_exit4;
+               }
+               my_cq->uspace_queue = resp.ipz_queue.queue;
+               resp.galpas = my_cq->galpas;
+               ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
+                                        (void**)&resp.galpas.kernel.fw_handle,
+                                        &vma);
+               if (ret) {
+                       ehca_err(device, "Could not mmap fw_handle");
+                       cq = ERR_PTR(ret);
+                       goto create_cq_exit5;
+               }
+               my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
+               if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+                       ehca_err(device, "Copy to udata failed.");
+                       goto create_cq_exit6;
+               }
+       }
+
+       return cq;
+
+create_cq_exit6:
+       ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+
+create_cq_exit5:
+       ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
+
+create_cq_exit4:
+       ipz_queue_dtor(&my_cq->ipz_queue);
+
+create_cq_exit3:
+       h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+       if (h_ret != H_SUCCESS)
+               ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
+                        "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
+
+create_cq_exit2:
+       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+       idr_remove(&ehca_cq_idr, my_cq->token);
+       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+create_cq_exit1:
+       kmem_cache_free(cq_cache, my_cq);
+
+       return cq;
+}
+
+int ehca_destroy_cq(struct ib_cq *cq)
+{
+       u64 h_ret;
+       int ret;
+       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+       int cq_num = my_cq->cq_number;
+       struct ib_device *device = cq->device;
+       struct ehca_shca *shca = container_of(device, struct ehca_shca,
+                                             ib_device);
+       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+       u32 cur_pid = current->tgid;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+       while (my_cq->nr_callbacks)
+               yield();
+
+       idr_remove(&ehca_cq_idr, my_cq->token);
+       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+       if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+               ehca_err(device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_cq->ownpid);
+               return -EINVAL;
+       }
+
+       /* un-mmap if vma alloc */
+       if (my_cq->uspace_queue ) {
+               ret = ehca_munmap(my_cq->uspace_queue,
+                                 my_cq->ipz_queue.queue_length);
+               if (ret)
+                       ehca_err(device, "Could not munmap queue ehca_cq=%p "
+                                "cq_num=%x", my_cq, cq_num);
+               ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+               if (ret)
+                       ehca_err(device, "Could not munmap fwh ehca_cq=%p "
+                                "cq_num=%x", my_cq, cq_num);
+       }
+
+       h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
+       if (h_ret == H_R_STATE) {
+               /* cq in err: read err data and destroy it forcibly */
+               ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
+                        "state. Try to delete it forcibly.",
+                        my_cq, cq_num, my_cq->ipz_cq_handle.handle);
+               ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
+               h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+               if (h_ret == H_SUCCESS)
+                       ehca_dbg(device, "cq_num=%x deleted successfully.",
+                                cq_num);
+       }
+       if (h_ret != H_SUCCESS) {
+               ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
+                        "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
+               return ehca2ib_return_code(h_ret);
+       }
+       ipz_queue_dtor(&my_cq->ipz_queue);
+       kmem_cache_free(cq_cache, my_cq);
+
+       return 0;
+}
+
+int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+       u32 cur_pid = current->tgid;
+
+       if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+               ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_cq->ownpid);
+               return -EINVAL;
+       }
+
+       /* TODO: proper resize needs to be done */
+       ehca_err(cq->device, "not implemented yet");
+
+       return -EFAULT;
+}
+
+int ehca_init_cq_cache(void)
+{
+       cq_cache = kmem_cache_create("ehca_cache_cq",
+                                    sizeof(struct ehca_cq), 0,
+                                    SLAB_HWCACHE_ALIGN,
+                                    NULL, NULL);
+       if (!cq_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void ehca_cleanup_cq_cache(void)
+{
+       if (cq_cache)
+               kmem_cache_destroy(cq_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
new file mode 100644 (file)
index 0000000..5281dec
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Event queue handling
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Khadija Souissi <souissi@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "ehca_iverbs.h"
+#include "ehca_qes.h"
+#include "hcp_if.h"
+#include "ipz_pt_fn.h"
+
+int ehca_create_eq(struct ehca_shca *shca,
+                  struct ehca_eq *eq,
+                  const enum ehca_eq_type type, const u32 length)
+{
+       u64 ret;
+       u32 nr_pages;
+       u32 i;
+       void *vpage;
+       struct ib_device *ib_dev = &shca->ib_device;
+
+       spin_lock_init(&eq->spinlock);
+       eq->is_initialized = 0;
+
+       if (type != EHCA_EQ && type != EHCA_NEQ) {
+               ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
+               return -EINVAL;
+       }
+       if (!length) {
+               ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
+               return -EINVAL;
+       }
+
+       ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
+                                      &eq->pf,
+                                      type,
+                                      length,
+                                      &eq->ipz_eq_handle,
+                                      &eq->length,
+                                      &nr_pages, &eq->ist);
+
+       if (ret != H_SUCCESS) {
+               ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
+               return -EINVAL;
+       }
+
+       ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages,
+                            EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0);
+       if (!ret) {
+               ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
+               goto create_eq_exit1;
+       }
+
+       for (i = 0; i < nr_pages; i++) {
+               u64 rpage;
+
+               if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
+                       ret = H_RESOURCE;
+                       goto create_eq_exit2;
+               }
+
+               rpage = virt_to_abs(vpage);
+               ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
+                                              eq->ipz_eq_handle,
+                                              &eq->pf,
+                                              0, 0, rpage, 1);
+
+               if (i == (nr_pages - 1)) {
+                       /* last page */
+                       vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
+                       if (ret != H_SUCCESS || vpage)
+                               goto create_eq_exit2;
+               } else {
+                       if (ret != H_PAGE_REGISTERED || !vpage)
+                               goto create_eq_exit2;
+               }
+       }
+
+       ipz_qeit_reset(&eq->ipz_queue);
+
+       /* register interrupt handlers and initialize work queues */
+       if (type == EHCA_EQ) {
+               ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq,
+                                         SA_INTERRUPT, "ehca_eq",
+                                         (void *)shca);
+               if (ret < 0)
+                       ehca_err(ib_dev, "Can't map interrupt handler.");
+
+               tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
+       } else if (type == EHCA_NEQ) {
+               ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq,
+                                         SA_INTERRUPT, "ehca_neq",
+                                         (void *)shca);
+               if (ret < 0)
+                       ehca_err(ib_dev, "Can't map interrupt handler.");
+
+               tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
+       }
+
+       eq->is_initialized = 1;
+
+       return 0;
+
+create_eq_exit2:
+       ipz_queue_dtor(&eq->ipz_queue);
+
+create_eq_exit1:
+       hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+
+       return -EINVAL;
+}
+
+void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
+{
+       unsigned long flags;
+       void *eqe;
+
+       spin_lock_irqsave(&eq->spinlock, flags);
+       eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
+       spin_unlock_irqrestore(&eq->spinlock, flags);
+
+       return eqe;
+}
+
+int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
+{
+       unsigned long flags;
+       u64 h_ret;
+
+       spin_lock_irqsave(&eq->spinlock, flags);
+       ibmebus_free_irq(NULL, eq->ist, (void *)shca);
+
+       h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+
+       spin_unlock_irqrestore(&eq->spinlock, flags);
+
+       if (h_ret != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "Can't free EQ resources.");
+               return -EINVAL;
+       }
+       ipz_queue_dtor(&eq->ipz_queue);
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
new file mode 100644 (file)
index 0000000..5eae6ac
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  HCA query functions
+ *
+ *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_tools.h"
+#include "hcp_if.h"
+
+int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
+{
+       int ret = 0;
+       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+                                             ib_device);
+       struct hipz_query_hca *rblock;
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+               return -ENOMEM;
+       }
+
+       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "Can't query device properties");
+               ret = -EINVAL;
+               goto query_device1;
+       }
+
+       memset(props, 0, sizeof(struct ib_device_attr));
+       props->fw_ver          = rblock->hw_ver;
+       props->max_mr_size     = rblock->max_mr_size;
+       props->vendor_id       = rblock->vendor_id >> 8;
+       props->vendor_part_id  = rblock->vendor_part_id >> 16;
+       props->hw_ver          = rblock->hw_ver;
+       props->max_qp          = min_t(int, rblock->max_qp, INT_MAX);
+       props->max_qp_wr       = min_t(int, rblock->max_wqes_wq, INT_MAX);
+       props->max_sge         = min_t(int, rblock->max_sge, INT_MAX);
+       props->max_sge_rd      = min_t(int, rblock->max_sge_rd, INT_MAX);
+       props->max_cq          = min_t(int, rblock->max_cq, INT_MAX);
+       props->max_cqe         = min_t(int, rblock->max_cqe, INT_MAX);
+       props->max_mr          = min_t(int, rblock->max_mr, INT_MAX);
+       props->max_mw          = min_t(int, rblock->max_mw, INT_MAX);
+       props->max_pd          = min_t(int, rblock->max_pd, INT_MAX);
+       props->max_ah          = min_t(int, rblock->max_ah, INT_MAX);
+       props->max_fmr         = min_t(int, rblock->max_mr, INT_MAX);
+       props->max_srq         = 0;
+       props->max_srq_wr      = 0;
+       props->max_srq_sge     = 0;
+       props->max_pkeys       = 16;
+       props->local_ca_ack_delay
+               = rblock->local_ca_ack_delay;
+       props->max_raw_ipv6_qp
+               = min_t(int, rblock->max_raw_ipv6_qp, INT_MAX);
+       props->max_raw_ethy_qp
+               = min_t(int, rblock->max_raw_ethy_qp, INT_MAX);
+       props->max_mcast_grp
+               = min_t(int, rblock->max_mcast_grp, INT_MAX);
+       props->max_mcast_qp_attach
+               = min_t(int, rblock->max_mcast_qp_attach, INT_MAX);
+       props->max_total_mcast_qp_attach
+               = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
+
+query_device1:
+       kfree(rblock);
+
+       return ret;
+}
+
+int ehca_query_port(struct ib_device *ibdev,
+                   u8 port, struct ib_port_attr *props)
+{
+       int ret = 0;
+       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+                                             ib_device);
+       struct hipz_query_port *rblock;
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+               return -ENOMEM;
+       }
+
+       if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "Can't query port properties");
+               ret = -EINVAL;
+               goto query_port1;
+       }
+
+       memset(props, 0, sizeof(struct ib_port_attr));
+       props->state = rblock->state;
+
+       switch (rblock->max_mtu) {
+       case 0x1:
+               props->active_mtu = props->max_mtu = IB_MTU_256;
+               break;
+       case 0x2:
+               props->active_mtu = props->max_mtu = IB_MTU_512;
+               break;
+       case 0x3:
+               props->active_mtu = props->max_mtu = IB_MTU_1024;
+               break;
+       case 0x4:
+               props->active_mtu = props->max_mtu = IB_MTU_2048;
+               break;
+       case 0x5:
+               props->active_mtu = props->max_mtu = IB_MTU_4096;
+               break;
+       default:
+               ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
+                        rblock->max_mtu);
+               break;
+       }
+
+       props->gid_tbl_len     = rblock->gid_tbl_len;
+       props->max_msg_sz      = rblock->max_msg_sz;
+       props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
+       props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
+       props->pkey_tbl_len    = rblock->pkey_tbl_len;
+       props->lid             = rblock->lid;
+       props->sm_lid          = rblock->sm_lid;
+       props->lmc             = rblock->lmc;
+       props->sm_sl           = rblock->sm_sl;
+       props->subnet_timeout  = rblock->subnet_timeout;
+       props->init_type_reply = rblock->init_type_reply;
+
+       props->active_width    = IB_WIDTH_12X;
+       props->active_speed    = 0x1;
+
+query_port1:
+       kfree(rblock);
+
+       return ret;
+}
+
+int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+       int ret = 0;
+       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+       struct hipz_query_port *rblock;
+
+       if (index > 16) {
+               ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+               return -EINVAL;
+       }
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
+               return -ENOMEM;
+       }
+
+       if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "Can't query port properties");
+               ret = -EINVAL;
+               goto query_pkey1;
+       }
+
+       memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
+
+query_pkey1:
+       kfree(rblock);
+
+       return ret;
+}
+
+int ehca_query_gid(struct ib_device *ibdev, u8 port,
+                  int index, union ib_gid *gid)
+{
+       int ret = 0;
+       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+                                             ib_device);
+       struct hipz_query_port *rblock;
+
+       if (index > 255) {
+               ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+               return -EINVAL;
+       }
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+               return -ENOMEM;
+       }
+
+       if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "Can't query port properties");
+               ret = -EINVAL;
+               goto query_gid1;
+       }
+
+       memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
+       memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
+
+query_gid1:
+       kfree(rblock);
+
+       return ret;
+}
+
+int ehca_modify_port(struct ib_device *ibdev,
+                    u8 port, int port_modify_mask,
+                    struct ib_port_modify *props)
+{
+       /* Not implemented yet */
+       return -EFAULT;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
new file mode 100644 (file)
index 0000000..2a65b5b
--- /dev/null
@@ -0,0 +1,762 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Functions for EQs, NEQs and interrupts
+ *
+ *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Khadija Souissi <souissi@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "ehca_iverbs.h"
+#include "ehca_tools.h"
+#include "hcp_if.h"
+#include "hipz_fns.h"
+
+#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM(1,1)
+#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM(8,31)
+#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM(2,7)
+#define EQE_CQ_NUMBER          EHCA_BMASK_IBM(8,31)
+#define EQE_QP_NUMBER          EHCA_BMASK_IBM(8,31)
+#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32,63)
+#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32,63)
+
+#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM(1,1)
+#define NEQE_EVENT_CODE        EHCA_BMASK_IBM(2,7)
+#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM(8,15)
+#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
+
+#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52,63)
+#define ERROR_DATA_TYPE        EHCA_BMASK_IBM(0,7)
+
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
+static void queue_comp_task(struct ehca_cq *__cq);
+
+static struct ehca_comp_pool* pool;
+static struct notifier_block comp_pool_callback_nb;
+
+#endif
+
+static inline void comp_event_callback(struct ehca_cq *cq)
+{
+       if (!cq->ib_cq.comp_handler)
+               return;
+
+       spin_lock(&cq->cb_lock);
+       cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
+       spin_unlock(&cq->cb_lock);
+
+       return;
+}
+
+static void print_error_data(struct ehca_shca * shca, void* data,
+                            u64* rblock, int length)
+{
+       u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+       u64 resource = rblock[1];
+
+       switch (type) {
+       case 0x1: /* Queue Pair */
+       {
+               struct ehca_qp *qp = (struct ehca_qp*)data;
+
+               /* only print error data if AER is set */
+               if (rblock[6] == 0)
+                       return;
+
+               ehca_err(&shca->ib_device,
+                        "QP 0x%x (resource=%lx) has errors.",
+                        qp->ib_qp.qp_num, resource);
+               break;
+       }
+       case 0x4: /* Completion Queue */
+       {
+               struct ehca_cq *cq = (struct ehca_cq*)data;
+
+               ehca_err(&shca->ib_device,
+                        "CQ 0x%x (resource=%lx) has errors.",
+                        cq->cq_number, resource);
+               break;
+       }
+       default:
+               ehca_err(&shca->ib_device,
+                        "Unknown errror type: %lx on %s.",
+                        type, shca->ib_device.name);
+               break;
+       }
+
+       ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
+       ehca_err(&shca->ib_device, "EHCA ----- error data begin "
+                "---------------------------------------------------");
+       ehca_dmp(rblock, length, "resource=%lx", resource);
+       ehca_err(&shca->ib_device, "EHCA ----- error data end "
+                "----------------------------------------------------");
+
+       return;
+}
+
+int ehca_error_data(struct ehca_shca *shca, void *data,
+                   u64 resource)
+{
+
+       unsigned long ret;
+       u64 *rblock;
+       unsigned long block_count;
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
+               ret = -ENOMEM;
+               goto error_data1;
+       }
+
+       ret = hipz_h_error_data(shca->ipz_hca_handle,
+                               resource,
+                               rblock,
+                               &block_count);
+
+       if (ret == H_R_STATE) {
+               ehca_err(&shca->ib_device,
+                        "No error data is available: %lx.", resource);
+       }
+       else if (ret == H_SUCCESS) {
+               int length;
+
+               length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
+
+               if (length > PAGE_SIZE)
+                       length = PAGE_SIZE;
+
+               print_error_data(shca, data, rblock, length);
+       }
+       else {
+               ehca_err(&shca->ib_device,
+                        "Error data could not be fetched: %lx", resource);
+       }
+
+       kfree(rblock);
+
+error_data1:
+       return ret;
+
+}
+
+static void qp_event_callback(struct ehca_shca *shca,
+                             u64 eqe,
+                             enum ib_event_type event_type)
+{
+       struct ib_event event;
+       struct ehca_qp *qp;
+       unsigned long flags;
+       u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
+
+       spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+       qp = idr_find(&ehca_qp_idr, token);
+       spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+
+       if (!qp)
+               return;
+
+       ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
+
+       if (!qp->ib_qp.event_handler)
+               return;
+
+       event.device     = &shca->ib_device;
+       event.event      = event_type;
+       event.element.qp = &qp->ib_qp;
+
+       qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+       return;
+}
+
+static void cq_event_callback(struct ehca_shca *shca,
+                                         u64 eqe)
+{
+       struct ehca_cq *cq;
+       unsigned long flags;
+       u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
+
+       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+       cq = idr_find(&ehca_cq_idr, token);
+       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+       if (!cq)
+               return;
+
+       ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
+
+       return;
+}
+
+static void parse_identifier(struct ehca_shca *shca, u64 eqe)
+{
+       u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
+
+       switch (identifier) {
+       case 0x02: /* path migrated */
+               qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
+               break;
+       case 0x03: /* communication established */
+               qp_event_callback(shca, eqe, IB_EVENT_COMM_EST);
+               break;
+       case 0x04: /* send queue drained */
+               qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED);
+               break;
+       case 0x05: /* QP error */
+       case 0x06: /* QP error */
+               qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL);
+               break;
+       case 0x07: /* CQ error */
+       case 0x08: /* CQ error */
+               cq_event_callback(shca, eqe);
+               break;
+       case 0x09: /* MRMWPTE error */
+               ehca_err(&shca->ib_device, "MRMWPTE error.");
+               break;
+       case 0x0A: /* port event */
+               ehca_err(&shca->ib_device, "Port event.");
+               break;
+       case 0x0B: /* MR access error */
+               ehca_err(&shca->ib_device, "MR access error.");
+               break;
+       case 0x0C: /* EQ error */
+               ehca_err(&shca->ib_device, "EQ error.");
+               break;
+       case 0x0D: /* P/Q_Key mismatch */
+               ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
+               break;
+       case 0x10: /* sampling complete */
+               ehca_err(&shca->ib_device, "Sampling complete.");
+               break;
+       case 0x11: /* unaffiliated access error */
+               ehca_err(&shca->ib_device, "Unaffiliated access error.");
+               break;
+       case 0x12: /* path migrating error */
+               ehca_err(&shca->ib_device, "Path migration error.");
+               break;
+       case 0x13: /* interface trace stopped */
+               ehca_err(&shca->ib_device, "Interface trace stopped.");
+               break;
+       case 0x14: /* first error capture info available */
+       default:
+               ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
+                        identifier, shca->ib_device.name);
+               break;
+       }
+
+       return;
+}
+
+static void parse_ec(struct ehca_shca *shca, u64 eqe)
+{
+       struct ib_event event;
+       u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
+       u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
+
+       switch (ec) {
+       case 0x30: /* port availability change */
+               if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
+                       ehca_info(&shca->ib_device,
+                                 "port %x is active.", port);
+                       event.device = &shca->ib_device;
+                       event.event = IB_EVENT_PORT_ACTIVE;
+                       event.element.port_num = port;
+                       shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
+                       ib_dispatch_event(&event);
+               } else {
+                       ehca_info(&shca->ib_device,
+                                 "port %x is inactive.", port);
+                       event.device = &shca->ib_device;
+                       event.event = IB_EVENT_PORT_ERR;
+                       event.element.port_num = port;
+                       shca->sport[port - 1].port_state = IB_PORT_DOWN;
+                       ib_dispatch_event(&event);
+               }
+               break;
+       case 0x31:
+               /* port configuration change
+                * disruptive change is caused by
+                * LID, PKEY or SM change
+                */
+               ehca_warn(&shca->ib_device,
+                         "disruptive port %x configuration change", port);
+
+               ehca_info(&shca->ib_device,
+                        "port %x is inactive.", port);
+               event.device = &shca->ib_device;
+               event.event = IB_EVENT_PORT_ERR;
+               event.element.port_num = port;
+               shca->sport[port - 1].port_state = IB_PORT_DOWN;
+               ib_dispatch_event(&event);
+
+               ehca_info(&shca->ib_device,
+                        "port %x is active.", port);
+               event.device = &shca->ib_device;
+               event.event = IB_EVENT_PORT_ACTIVE;
+               event.element.port_num = port;
+               shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
+               ib_dispatch_event(&event);
+               break;
+       case 0x32: /* adapter malfunction */
+               ehca_err(&shca->ib_device, "Adapter malfunction.");
+               break;
+       case 0x33:  /* trace stopped */
+               ehca_err(&shca->ib_device, "Traced stopped.");
+               break;
+       default:
+               ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
+                        ec, shca->ib_device.name);
+               break;
+       }
+
+       return;
+}
+
+static inline void reset_eq_pending(struct ehca_cq *cq)
+{
+       u64 CQx_EP;
+       struct h_galpa gal = cq->galpas.kernel;
+
+       hipz_galpa_store_cq(gal, cqx_ep, 0x0);
+       CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
+
+       return;
+}
+
+irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct ehca_shca *shca = (struct ehca_shca*)dev_id;
+
+       tasklet_hi_schedule(&shca->neq.interrupt_task);
+
+       return IRQ_HANDLED;
+}
+
+void ehca_tasklet_neq(unsigned long data)
+{
+       struct ehca_shca *shca = (struct ehca_shca*)data;
+       struct ehca_eqe *eqe;
+       u64 ret;
+
+       eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
+
+       while (eqe) {
+               if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
+                       parse_ec(shca, eqe->entry);
+
+               eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
+       }
+
+       ret = hipz_h_reset_event(shca->ipz_hca_handle,
+                                shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
+
+       if (ret != H_SUCCESS)
+               ehca_err(&shca->ib_device, "Can't clear notification events.");
+
+       return;
+}
+
+irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct ehca_shca *shca = (struct ehca_shca*)dev_id;
+
+       tasklet_hi_schedule(&shca->eq.interrupt_task);
+
+       return IRQ_HANDLED;
+}
+
+void ehca_tasklet_eq(unsigned long data)
+{
+       struct ehca_shca *shca = (struct ehca_shca*)data;
+       struct ehca_eqe *eqe;
+       int int_state;
+       int query_cnt = 0;
+
+       do {
+               eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
+
+               if ((shca->hw_level >= 2) && eqe)
+                       int_state = 1;
+               else
+                       int_state = 0;
+
+               while ((int_state == 1) || eqe) {
+                       while (eqe) {
+                               u64 eqe_value = eqe->entry;
+
+                               ehca_dbg(&shca->ib_device,
+                                        "eqe_value=%lx", eqe_value);
+
+                               /* TODO: better structure */
+                               if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
+                                                  eqe_value)) {
+                                       unsigned long flags;
+                                       u32 token;
+                                       struct ehca_cq *cq;
+
+                                       ehca_dbg(&shca->ib_device,
+                                                "... completion event");
+                                       token =
+                                               EHCA_BMASK_GET(EQE_CQ_TOKEN,
+                                                              eqe_value);
+                                       spin_lock_irqsave(&ehca_cq_idr_lock,
+                                                         flags);
+                                       cq = idr_find(&ehca_cq_idr, token);
+
+                                       if (cq == NULL) {
+                                               spin_unlock(&ehca_cq_idr_lock);
+                                               break;
+                                       }
+
+                                       reset_eq_pending(cq);
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+                                       queue_comp_task(cq);
+                                       spin_unlock_irqrestore(&ehca_cq_idr_lock,
+                                                              flags);
+#else
+                                       spin_unlock_irqrestore(&ehca_cq_idr_lock,
+                                                              flags);
+                                       comp_event_callback(cq);
+#endif
+                               } else {
+                                       ehca_dbg(&shca->ib_device,
+                                                "... non completion event");
+                                       parse_identifier(shca, eqe_value);
+                               }
+                               eqe =
+                                       (struct ehca_eqe *)ehca_poll_eq(shca,
+                                                                   &shca->eq);
+                       }
+
+                       if (shca->hw_level >= 2) {
+                               int_state =
+                                   hipz_h_query_int_state(shca->ipz_hca_handle,
+                                                          shca->eq.ist);
+                               query_cnt++;
+                               iosync();
+                               if (query_cnt >= 100) {
+                                       query_cnt = 0;
+                                       int_state = 0;
+                               }
+                       }
+                       eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
+
+               }
+       } while (int_state != 0);
+
+       return;
+}
+
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
+static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
+{
+       unsigned long flags_last_cpu;
+
+       if (ehca_debug_level)
+               ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+
+       spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu);
+       pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map);
+       if (pool->last_cpu == NR_CPUS)
+               pool->last_cpu = first_cpu(cpu_online_map);
+       spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu);
+
+       return pool->last_cpu;
+}
+
+static void __queue_comp_task(struct ehca_cq *__cq,
+                             struct ehca_cpu_comp_task *cct)
+{
+       unsigned long flags_cct;
+       unsigned long flags_cq;
+
+       spin_lock_irqsave(&cct->task_lock, flags_cct);
+       spin_lock_irqsave(&__cq->task_lock, flags_cq);
+
+       if (__cq->nr_callbacks == 0) {
+               __cq->nr_callbacks++;
+               list_add_tail(&__cq->entry, &cct->cq_list);
+               cct->cq_jobs++;
+               wake_up(&cct->wait_queue);
+       }
+       else
+               __cq->nr_callbacks++;
+
+       spin_unlock_irqrestore(&__cq->task_lock, flags_cq);
+       spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+}
+
+static void queue_comp_task(struct ehca_cq *__cq)
+{
+       int cpu;
+       int cpu_id;
+       struct ehca_cpu_comp_task *cct;
+
+       cpu = get_cpu();
+       cpu_id = find_next_online_cpu(pool);
+
+       BUG_ON(!cpu_online(cpu_id));
+
+       cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
+
+       if (cct->cq_jobs > 0) {
+               cpu_id = find_next_online_cpu(pool);
+               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
+       }
+
+       __queue_comp_task(__cq, cct);
+
+       put_cpu();
+
+       return;
+}
+
+static void run_comp_task(struct ehca_cpu_comp_task* cct)
+{
+       struct ehca_cq *cq;
+       unsigned long flags_cct;
+       unsigned long flags_cq;
+
+       spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+       while (!list_empty(&cct->cq_list)) {
+               cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
+               spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+               comp_event_callback(cq);
+               spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+               spin_lock_irqsave(&cq->task_lock, flags_cq);
+               cq->nr_callbacks--;
+               if (cq->nr_callbacks == 0) {
+                       list_del_init(cct->cq_list.next);
+                       cct->cq_jobs--;
+               }
+               spin_unlock_irqrestore(&cq->task_lock, flags_cq);
+
+       }
+
+       spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+       return;
+}
+
+static int comp_task(void *__cct)
+{
+       struct ehca_cpu_comp_task* cct = __cct;
+       DECLARE_WAITQUEUE(wait, current);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       while(!kthread_should_stop()) {
+               add_wait_queue(&cct->wait_queue, &wait);
+
+               if (list_empty(&cct->cq_list))
+                       schedule();
+               else
+                       __set_current_state(TASK_RUNNING);
+
+               remove_wait_queue(&cct->wait_queue, &wait);
+
+               if (!list_empty(&cct->cq_list))
+                       run_comp_task(__cct);
+
+               set_current_state(TASK_INTERRUPTIBLE);
+       }
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
+                                           int cpu)
+{
+       struct ehca_cpu_comp_task *cct;
+
+       cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+       spin_lock_init(&cct->task_lock);
+       INIT_LIST_HEAD(&cct->cq_list);
+       init_waitqueue_head(&cct->wait_queue);
+       cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
+
+       return cct->task;
+}
+
+static void destroy_comp_task(struct ehca_comp_pool *pool,
+                             int cpu)
+{
+       struct ehca_cpu_comp_task *cct;
+       struct task_struct *task;
+       unsigned long flags_cct;
+
+       cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+
+       spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+       task = cct->task;
+       cct->task = NULL;
+       cct->cq_jobs = 0;
+
+       spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+       if (task)
+               kthread_stop(task);
+
+       return;
+}
+
+static void take_over_work(struct ehca_comp_pool *pool,
+                          int cpu)
+{
+       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+       LIST_HEAD(list);
+       struct ehca_cq *cq;
+       unsigned long flags_cct;
+
+       spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+       list_splice_init(&cct->cq_list, &list);
+
+       while(!list_empty(&list)) {
+              cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
+
+              list_del(&cq->entry);
+              __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
+                                                smp_processor_id()));
+       }
+
+       spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+}
+
+static int comp_pool_callback(struct notifier_block *nfb,
+                             unsigned long action,
+                             void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+       struct ehca_cpu_comp_task *cct;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+               ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
+               if(!create_comp_task(pool, cpu)) {
+                       ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
+                       return NOTIFY_BAD;
+               }
+               break;
+       case CPU_UP_CANCELED:
+               ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
+               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+               kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+               destroy_comp_task(pool, cpu);
+               break;
+       case CPU_ONLINE:
+               ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
+               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+               kthread_bind(cct->task, cpu);
+               wake_up_process(cct->task);
+               break;
+       case CPU_DOWN_PREPARE:
+               ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
+               break;
+       case CPU_DOWN_FAILED:
+               ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
+               break;
+       case CPU_DEAD:
+               ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
+               destroy_comp_task(pool, cpu);
+               take_over_work(pool, cpu);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+#endif
+
+int ehca_create_comp_pool(void)
+{
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+       int cpu;
+       struct task_struct *task;
+
+       pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
+       if (pool == NULL)
+               return -ENOMEM;
+
+       spin_lock_init(&pool->last_cpu_lock);
+       pool->last_cpu = any_online_cpu(cpu_online_map);
+
+       pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
+       if (pool->cpu_comp_tasks == NULL) {
+               kfree(pool);
+               return -EINVAL;
+       }
+
+       for_each_online_cpu(cpu) {
+               task = create_comp_task(pool, cpu);
+               if (task) {
+                       kthread_bind(task, cpu);
+                       wake_up_process(task);
+               }
+       }
+
+       comp_pool_callback_nb.notifier_call = comp_pool_callback;
+       comp_pool_callback_nb.priority =0;
+       register_cpu_notifier(&comp_pool_callback_nb);
+#endif
+
+       return 0;
+}
+
+void ehca_destroy_comp_pool(void)
+{
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+       int i;
+
+       unregister_cpu_notifier(&comp_pool_callback_nb);
+
+       for (i = 0; i < NR_CPUS; i++) {
+               if (cpu_online(i))
+                       destroy_comp_task(pool, i);
+       }
+#endif
+
+       return;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h
new file mode 100644 (file)
index 0000000..85bf1fe
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Function definitions and structs for EQs, NEQs and interrupts
+ *
+ *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Khadija Souissi <souissi@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_IRQ_H
+#define __EHCA_IRQ_H
+
+
+struct ehca_shca;
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/atomic.h>
+
+int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
+
+irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs);
+void ehca_tasklet_neq(unsigned long data);
+
+irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs);
+void ehca_tasklet_eq(unsigned long data);
+
+struct ehca_cpu_comp_task {
+       wait_queue_head_t wait_queue;
+       struct list_head cq_list;
+       struct task_struct *task;
+       spinlock_t task_lock;
+       int cq_jobs;
+};
+
+struct ehca_comp_pool {
+       struct ehca_cpu_comp_task *cpu_comp_tasks;
+       int last_cpu;
+       spinlock_t last_cpu_lock;
+};
+
+int ehca_create_comp_pool(void);
+void ehca_destroy_comp_pool(void);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
new file mode 100644 (file)
index 0000000..bbdc437
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Function definitions for internal functions
+ *
+ *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Dietmar Decker <ddecker@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_IVERBS_H__
+#define __EHCA_IVERBS_H__
+
+#include "ehca_classes.h"
+
+int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
+
+int ehca_query_port(struct ib_device *ibdev, u8 port,
+                   struct ib_port_attr *props);
+
+int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
+
+int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
+                  union ib_gid *gid);
+
+int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
+                    struct ib_port_modify *props);
+
+struct ib_pd *ehca_alloc_pd(struct ib_device *device,
+                           struct ib_ucontext *context,
+                           struct ib_udata *udata);
+
+int ehca_dealloc_pd(struct ib_pd *pd);
+
+struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+
+int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+int ehca_destroy_ah(struct ib_ah *ah);
+
+struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
+
+struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
+                              struct ib_phys_buf *phys_buf_array,
+                              int num_phys_buf,
+                              int mr_access_flags, u64 *iova_start);
+
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
+                              struct ib_umem *region,
+                              int mr_access_flags, struct ib_udata *udata);
+
+int ehca_rereg_phys_mr(struct ib_mr *mr,
+                      int mr_rereg_mask,
+                      struct ib_pd *pd,
+                      struct ib_phys_buf *phys_buf_array,
+                      int num_phys_buf, int mr_access_flags, u64 *iova_start);
+
+int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
+
+int ehca_dereg_mr(struct ib_mr *mr);
+
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd);
+
+int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+                struct ib_mw_bind *mw_bind);
+
+int ehca_dealloc_mw(struct ib_mw *mw);
+
+struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
+                             int mr_access_flags,
+                             struct ib_fmr_attr *fmr_attr);
+
+int ehca_map_phys_fmr(struct ib_fmr *fmr,
+                     u64 *page_list, int list_len, u64 iova);
+
+int ehca_unmap_fmr(struct list_head *fmr_list);
+
+int ehca_dealloc_fmr(struct ib_fmr *fmr);
+
+enum ehca_eq_type {
+       EHCA_EQ = 0, /* Event Queue              */
+       EHCA_NEQ     /* Notification Event Queue */
+};
+
+int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
+                  enum ehca_eq_type type, const u32 length);
+
+int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
+
+void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
+
+
+struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
+                            struct ib_ucontext *context,
+                            struct ib_udata *udata);
+
+int ehca_destroy_cq(struct ib_cq *cq);
+
+int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+
+int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+
+int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
+
+int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify);
+
+struct ib_qp *ehca_create_qp(struct ib_pd *pd,
+                            struct ib_qp_init_attr *init_attr,
+                            struct ib_udata *udata);
+
+int ehca_destroy_qp(struct ib_qp *qp);
+
+int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
+
+int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+                 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+
+int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
+                  struct ib_send_wr **bad_send_wr);
+
+int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
+                  struct ib_recv_wr **bad_recv_wr);
+
+u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
+                   struct ib_qp_init_attr *qp_init_attr);
+
+int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+
+int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+
+struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
+                                       struct ib_udata *udata);
+
+int ehca_dealloc_ucontext(struct ib_ucontext *context);
+
+int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+void ehca_poll_eqs(unsigned long data);
+
+int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
+                    struct vm_area_struct **vma);
+
+int ehca_mmap_register(u64 physical,void **mapped,
+                      struct vm_area_struct **vma);
+
+int ehca_munmap(unsigned long addr, size_t len);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
new file mode 100644 (file)
index 0000000..2a99f2d
--- /dev/null
@@ -0,0 +1,818 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  module start stop, hca detection
+ *
+ *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Joachim Fenkes <fenkes@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "ehca_iverbs.h"
+#include "ehca_mrmw.h"
+#include "ehca_tools.h"
+#include "hcp_if.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
+MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
+MODULE_VERSION("SVNEHCA_0016");
+
+int ehca_open_aqp1     = 0;
+int ehca_debug_level   = 0;
+int ehca_hw_level      = 0;
+int ehca_nr_ports      = 2;
+int ehca_use_hp_mr     = 0;
+int ehca_port_act_time = 30;
+int ehca_poll_all_eqs  = 1;
+int ehca_static_rate   = -1;
+
+module_param_named(open_aqp1,     ehca_open_aqp1,     int, 0);
+module_param_named(debug_level,   ehca_debug_level,   int, 0);
+module_param_named(hw_level,      ehca_hw_level,      int, 0);
+module_param_named(nr_ports,      ehca_nr_ports,      int, 0);
+module_param_named(use_hp_mr,     ehca_use_hp_mr,     int, 0);
+module_param_named(port_act_time, ehca_port_act_time, int, 0);
+module_param_named(poll_all_eqs,  ehca_poll_all_eqs,  int, 0);
+module_param_named(static_rate,   ehca_static_rate,   int, 0);
+
+MODULE_PARM_DESC(open_aqp1,
+                "AQP1 on startup (0: no (default), 1: yes)");
+MODULE_PARM_DESC(debug_level,
+                "debug level"
+                " (0: no debug traces (default), 1: with debug traces)");
+MODULE_PARM_DESC(hw_level,
+                "hardware level"
+                " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
+MODULE_PARM_DESC(nr_ports,
+                "number of connected ports (default: 2)");
+MODULE_PARM_DESC(use_hp_mr,
+                "high performance MRs (0: no (default), 1: yes)");
+MODULE_PARM_DESC(port_act_time,
+                "time to wait for port activation (default: 30 sec)");
+MODULE_PARM_DESC(poll_all_eqs,
+                "polls all event queues periodically"
+                " (0: no, 1: yes (default))");
+MODULE_PARM_DESC(static_rate,
+                "set permanent static rate (default: disabled)");
+
+spinlock_t ehca_qp_idr_lock;
+spinlock_t ehca_cq_idr_lock;
+DEFINE_IDR(ehca_qp_idr);
+DEFINE_IDR(ehca_cq_idr);
+
+static struct list_head shca_list; /* list of all registered ehcas */
+static spinlock_t shca_list_lock;
+
+static struct timer_list poll_eqs_timer;
+
+static int ehca_create_slab_caches(void)
+{
+       int ret;
+
+       ret = ehca_init_pd_cache();
+       if (ret) {
+               ehca_gen_err("Cannot create PD SLAB cache.");
+               return ret;
+       }
+
+       ret = ehca_init_cq_cache();
+       if (ret) {
+               ehca_gen_err("Cannot create CQ SLAB cache.");
+               goto create_slab_caches2;
+       }
+
+       ret = ehca_init_qp_cache();
+       if (ret) {
+               ehca_gen_err("Cannot create QP SLAB cache.");
+               goto create_slab_caches3;
+       }
+
+       ret = ehca_init_av_cache();
+       if (ret) {
+               ehca_gen_err("Cannot create AV SLAB cache.");
+               goto create_slab_caches4;
+       }
+
+       ret = ehca_init_mrmw_cache();
+       if (ret) {
+               ehca_gen_err("Cannot create MR&MW SLAB cache.");
+               goto create_slab_caches5;
+       }
+
+       return 0;
+
+create_slab_caches5:
+       ehca_cleanup_av_cache();
+
+create_slab_caches4:
+       ehca_cleanup_qp_cache();
+
+create_slab_caches3:
+       ehca_cleanup_cq_cache();
+
+create_slab_caches2:
+       ehca_cleanup_pd_cache();
+
+       return ret;
+}
+
+static void ehca_destroy_slab_caches(void)
+{
+       ehca_cleanup_mrmw_cache();
+       ehca_cleanup_av_cache();
+       ehca_cleanup_qp_cache();
+       ehca_cleanup_cq_cache();
+       ehca_cleanup_pd_cache();
+}
+
+#define EHCA_HCAAVER  EHCA_BMASK_IBM(32,39)
+#define EHCA_REVID    EHCA_BMASK_IBM(40,63)
+
+int ehca_sense_attributes(struct ehca_shca *shca)
+{
+       int ret = 0;
+       u64 h_ret;
+       struct hipz_query_hca *rblock;
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_gen_err("Cannot allocate rblock memory.");
+               return -ENOMEM;
+       }
+
+       h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
+       if (h_ret != H_SUCCESS) {
+               ehca_gen_err("Cannot query device properties. h_ret=%lx",
+                            h_ret);
+               ret = -EPERM;
+               goto num_ports1;
+       }
+
+       if (ehca_nr_ports == 1)
+               shca->num_ports = 1;
+       else
+               shca->num_ports = (u8)rblock->num_ports;
+
+       ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
+
+       if (ehca_hw_level == 0) {
+               u32 hcaaver;
+               u32 revid;
+
+               hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
+               revid   = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
+
+               ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
+
+               if ((hcaaver == 1) && (revid == 0))
+                       shca->hw_level = 0;
+               else if ((hcaaver == 1) && (revid == 1))
+                       shca->hw_level = 1;
+               else if ((hcaaver == 1) && (revid == 2))
+                       shca->hw_level = 2;
+       }
+       ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
+
+       shca->sport[0].rate = IB_RATE_30_GBPS;
+       shca->sport[1].rate = IB_RATE_30_GBPS;
+
+num_ports1:
+       kfree(rblock);
+       return ret;
+}
+
+static int init_node_guid(struct ehca_shca *shca)
+{
+       int ret = 0;
+       struct hipz_query_hca *rblock;
+
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!rblock) {
+               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+               return -ENOMEM;
+       }
+
+       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "Can't query device properties");
+               ret = -EINVAL;
+               goto init_node_guid1;
+       }
+
+       memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
+
+init_node_guid1:
+       kfree(rblock);
+       return ret;
+}
+
+int ehca_register_device(struct ehca_shca *shca)
+{
+       int ret;
+
+       ret = init_node_guid(shca);
+       if (ret)
+               return ret;
+
+       strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
+       shca->ib_device.owner               = THIS_MODULE;
+
+       shca->ib_device.uverbs_abi_ver      = 5;
+       shca->ib_device.uverbs_cmd_mask     =
+               (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
+               (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
+               (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
+               (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
+               (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
+               (1ull << IB_USER_VERBS_CMD_REG_MR)              |
+               (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
+               (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+               (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
+               (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
+               (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
+               (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
+               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
+               (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
+               (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
+               (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
+
+       shca->ib_device.node_type           = IB_NODE_CA;
+       shca->ib_device.phys_port_cnt       = shca->num_ports;
+       shca->ib_device.dma_device          = &shca->ibmebus_dev->ofdev.dev;
+       shca->ib_device.query_device        = ehca_query_device;
+       shca->ib_device.query_port          = ehca_query_port;
+       shca->ib_device.query_gid           = ehca_query_gid;
+       shca->ib_device.query_pkey          = ehca_query_pkey;
+       /* shca->in_device.modify_device    = ehca_modify_device    */
+       shca->ib_device.modify_port         = ehca_modify_port;
+       shca->ib_device.alloc_ucontext      = ehca_alloc_ucontext;
+       shca->ib_device.dealloc_ucontext    = ehca_dealloc_ucontext;
+       shca->ib_device.alloc_pd            = ehca_alloc_pd;
+       shca->ib_device.dealloc_pd          = ehca_dealloc_pd;
+       shca->ib_device.create_ah           = ehca_create_ah;
+       /* shca->ib_device.modify_ah        = ehca_modify_ah;       */
+       shca->ib_device.query_ah            = ehca_query_ah;
+       shca->ib_device.destroy_ah          = ehca_destroy_ah;
+       shca->ib_device.create_qp           = ehca_create_qp;
+       shca->ib_device.modify_qp           = ehca_modify_qp;
+       shca->ib_device.query_qp            = ehca_query_qp;
+       shca->ib_device.destroy_qp          = ehca_destroy_qp;
+       shca->ib_device.post_send           = ehca_post_send;
+       shca->ib_device.post_recv           = ehca_post_recv;
+       shca->ib_device.create_cq           = ehca_create_cq;
+       shca->ib_device.destroy_cq          = ehca_destroy_cq;
+       shca->ib_device.resize_cq           = ehca_resize_cq;
+       shca->ib_device.poll_cq             = ehca_poll_cq;
+       /* shca->ib_device.peek_cq          = ehca_peek_cq;         */
+       shca->ib_device.req_notify_cq       = ehca_req_notify_cq;
+       /* shca->ib_device.req_ncomp_notif  = ehca_req_ncomp_notif; */
+       shca->ib_device.get_dma_mr          = ehca_get_dma_mr;
+       shca->ib_device.reg_phys_mr         = ehca_reg_phys_mr;
+       shca->ib_device.reg_user_mr         = ehca_reg_user_mr;
+       shca->ib_device.query_mr            = ehca_query_mr;
+       shca->ib_device.dereg_mr            = ehca_dereg_mr;
+       shca->ib_device.rereg_phys_mr       = ehca_rereg_phys_mr;
+       shca->ib_device.alloc_mw            = ehca_alloc_mw;
+       shca->ib_device.bind_mw             = ehca_bind_mw;
+       shca->ib_device.dealloc_mw          = ehca_dealloc_mw;
+       shca->ib_device.alloc_fmr           = ehca_alloc_fmr;
+       shca->ib_device.map_phys_fmr        = ehca_map_phys_fmr;
+       shca->ib_device.unmap_fmr           = ehca_unmap_fmr;
+       shca->ib_device.dealloc_fmr         = ehca_dealloc_fmr;
+       shca->ib_device.attach_mcast        = ehca_attach_mcast;
+       shca->ib_device.detach_mcast        = ehca_detach_mcast;
+       /* shca->ib_device.process_mad      = ehca_process_mad;     */
+       shca->ib_device.mmap                = ehca_mmap;
+
+       ret = ib_register_device(&shca->ib_device);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "ib_register_device() failed ret=%x", ret);
+
+       return ret;
+}
+
+static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
+{
+       struct ehca_sport *sport = &shca->sport[port - 1];
+       struct ib_cq *ibcq;
+       struct ib_qp *ibqp;
+       struct ib_qp_init_attr qp_init_attr;
+       int ret;
+
+       if (sport->ibcq_aqp1) {
+               ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
+               return -EPERM;
+       }
+
+       ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10);
+       if (IS_ERR(ibcq)) {
+               ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
+               return PTR_ERR(ibcq);
+       }
+       sport->ibcq_aqp1 = ibcq;
+
+       if (sport->ibqp_aqp1) {
+               ehca_err(&shca->ib_device, "AQP1 QP is already created.");
+               ret = -EPERM;
+               goto create_aqp1;
+       }
+
+       memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
+       qp_init_attr.send_cq          = ibcq;
+       qp_init_attr.recv_cq          = ibcq;
+       qp_init_attr.sq_sig_type      = IB_SIGNAL_ALL_WR;
+       qp_init_attr.cap.max_send_wr  = 100;
+       qp_init_attr.cap.max_recv_wr  = 100;
+       qp_init_attr.cap.max_send_sge = 2;
+       qp_init_attr.cap.max_recv_sge = 1;
+       qp_init_attr.qp_type          = IB_QPT_GSI;
+       qp_init_attr.port_num         = port;
+       qp_init_attr.qp_context       = NULL;
+       qp_init_attr.event_handler    = NULL;
+       qp_init_attr.srq              = NULL;
+
+       ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
+       if (IS_ERR(ibqp)) {
+               ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
+               ret = PTR_ERR(ibqp);
+               goto create_aqp1;
+       }
+       sport->ibqp_aqp1 = ibqp;
+
+       return 0;
+
+create_aqp1:
+       ib_destroy_cq(sport->ibcq_aqp1);
+       return ret;
+}
+
+static int ehca_destroy_aqp1(struct ehca_sport *sport)
+{
+       int ret;
+
+       ret = ib_destroy_qp(sport->ibqp_aqp1);
+       if (ret) {
+               ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret);
+               return ret;
+       }
+
+       ret = ib_destroy_cq(sport->ibcq_aqp1);
+       if (ret)
+               ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret);
+
+       return ret;
+}
+
+static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
+{
+       return  snprintf(buf, PAGE_SIZE, "%d\n",
+                        ehca_debug_level);
+}
+
+static ssize_t ehca_store_debug_level(struct device_driver *ddp,
+                                     const char *buf, size_t count)
+{
+       int value = (*buf) - '0';
+       if (value >= 0 && value <= 9)
+               ehca_debug_level = value;
+       return 1;
+}
+
+DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
+           ehca_show_debug_level, ehca_store_debug_level);
+
+void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
+{
+       driver_create_file(&drv->driver, &driver_attr_debug_level);
+}
+
+void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
+{
+       driver_remove_file(&drv->driver, &driver_attr_debug_level);
+}
+
+#define EHCA_RESOURCE_ATTR(name)                                           \
+static ssize_t  ehca_show_##name(struct device *dev,                       \
+                                struct device_attribute *attr,            \
+                                char *buf)                                \
+{                                                                         \
+       struct ehca_shca *shca;                                            \
+       struct hipz_query_hca *rblock;                                     \
+       int data;                                                          \
+                                                                          \
+       shca = dev->driver_data;                                           \
+                                                                          \
+       rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);                      \
+       if (!rblock) {                                                     \
+               dev_err(dev, "Can't allocate rblock memory.");             \
+               return 0;                                                  \
+       }                                                                  \
+                                                                          \
+       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
+               dev_err(dev, "Can't query device properties");             \
+               kfree(rblock);                                             \
+               return 0;                                                  \
+       }                                                                  \
+                                                                          \
+       data = rblock->name;                                               \
+       kfree(rblock);                                                     \
+                                                                          \
+       if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1))     \
+               return snprintf(buf, 256, "1\n");                          \
+       else                                                               \
+               return snprintf(buf, 256, "%d\n", data);                   \
+                                                                          \
+}                                                                         \
+static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
+
+EHCA_RESOURCE_ATTR(num_ports);
+EHCA_RESOURCE_ATTR(hw_ver);
+EHCA_RESOURCE_ATTR(max_eq);
+EHCA_RESOURCE_ATTR(cur_eq);
+EHCA_RESOURCE_ATTR(max_cq);
+EHCA_RESOURCE_ATTR(cur_cq);
+EHCA_RESOURCE_ATTR(max_qp);
+EHCA_RESOURCE_ATTR(cur_qp);
+EHCA_RESOURCE_ATTR(max_mr);
+EHCA_RESOURCE_ATTR(cur_mr);
+EHCA_RESOURCE_ATTR(max_mw);
+EHCA_RESOURCE_ATTR(cur_mw);
+EHCA_RESOURCE_ATTR(max_pd);
+EHCA_RESOURCE_ATTR(max_ah);
+
+static ssize_t ehca_show_adapter_handle(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct ehca_shca *shca = dev->driver_data;
+
+       return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle);
+
+}
+static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+
+
+void ehca_create_device_sysfs(struct ibmebus_dev *dev)
+{
+       device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
+       device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
+       device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
+       device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
+       device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
+       device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
+       device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
+       device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
+       device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
+}
+
+void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
+{
+       device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
+       device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
+}
+
+static int __devinit ehca_probe(struct ibmebus_dev *dev,
+                               const struct of_device_id *id)
+{
+       struct ehca_shca *shca;
+       u64 *handle;
+       struct ib_pd *ibpd;
+       int ret;
+
+       handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle", NULL);
+       if (!handle) {
+               ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
+                            dev->ofdev.node->full_name);
+               return -ENODEV;
+       }
+
+       if (!(*handle)) {
+               ehca_gen_err("Wrong eHCA handle for adapter: %s.",
+                            dev->ofdev.node->full_name);
+               return -ENODEV;
+       }
+
+       shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
+       if (!shca) {
+               ehca_gen_err("Cannot allocate shca memory.");
+               return -ENOMEM;
+       }
+
+       shca->ibmebus_dev = dev;
+       shca->ipz_hca_handle.handle = *handle;
+       dev->ofdev.dev.driver_data = shca;
+
+       ret = ehca_sense_attributes(shca);
+       if (ret < 0) {
+               ehca_gen_err("Cannot sense eHCA attributes.");
+               goto probe1;
+       }
+
+       ret = ehca_register_device(shca);
+       if (ret) {
+               ehca_gen_err("Cannot register Infiniband device");
+               goto probe1;
+       }
+
+       /* create event queues */
+       ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
+       if (ret) {
+               ehca_err(&shca->ib_device, "Cannot create EQ.");
+               goto probe2;
+       }
+
+       ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
+       if (ret) {
+               ehca_err(&shca->ib_device, "Cannot create NEQ.");
+               goto probe3;
+       }
+
+       /* create internal protection domain */
+       ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
+       if (IS_ERR(ibpd)) {
+               ehca_err(&shca->ib_device, "Cannot create internal PD.");
+               ret = PTR_ERR(ibpd);
+               goto probe4;
+       }
+
+       shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
+       shca->pd->ib_pd.device = &shca->ib_device;
+
+       /* create internal max MR */
+       ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
+
+       if (ret) {
+               ehca_err(&shca->ib_device, "Cannot create internal MR ret=%x",
+                        ret);
+               goto probe5;
+       }
+
+       /* create AQP1 for port 1 */
+       if (ehca_open_aqp1 == 1) {
+               shca->sport[0].port_state = IB_PORT_DOWN;
+               ret = ehca_create_aqp1(shca, 1);
+               if (ret) {
+                       ehca_err(&shca->ib_device,
+                                "Cannot create AQP1 for port 1.");
+                       goto probe6;
+               }
+       }
+
+       /* create AQP1 for port 2 */
+       if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
+               shca->sport[1].port_state = IB_PORT_DOWN;
+               ret = ehca_create_aqp1(shca, 2);
+               if (ret) {
+                       ehca_err(&shca->ib_device,
+                                "Cannot create AQP1 for port 2.");
+                       goto probe7;
+               }
+       }
+
+       ehca_create_device_sysfs(dev);
+
+       spin_lock(&shca_list_lock);
+       list_add(&shca->shca_list, &shca_list);
+       spin_unlock(&shca_list_lock);
+
+       return 0;
+
+probe7:
+       ret = ehca_destroy_aqp1(&shca->sport[0]);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy AQP1 for port 1. ret=%x", ret);
+
+probe6:
+       ret = ehca_dereg_internal_maxmr(shca);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy internal MR. ret=%x", ret);
+
+probe5:
+       ret = ehca_dealloc_pd(&shca->pd->ib_pd);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy internal PD. ret=%x", ret);
+
+probe4:
+       ret = ehca_destroy_eq(shca, &shca->neq);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy NEQ. ret=%x", ret);
+
+probe3:
+       ret = ehca_destroy_eq(shca, &shca->eq);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy EQ. ret=%x", ret);
+
+probe2:
+       ib_unregister_device(&shca->ib_device);
+
+probe1:
+       ib_dealloc_device(&shca->ib_device);
+
+       return -EINVAL;
+}
+
+static int __devexit ehca_remove(struct ibmebus_dev *dev)
+{
+       struct ehca_shca *shca = dev->ofdev.dev.driver_data;
+       int ret;
+
+       ehca_remove_device_sysfs(dev);
+
+       if (ehca_open_aqp1 == 1) {
+               int i;
+               for (i = 0; i < shca->num_ports; i++) {
+                       ret = ehca_destroy_aqp1(&shca->sport[i]);
+                       if (ret)
+                               ehca_err(&shca->ib_device,
+                                        "Cannot destroy AQP1 for port %x "
+                                        "ret=%x", ret, i);
+               }
+       }
+
+       ib_unregister_device(&shca->ib_device);
+
+       ret = ehca_dereg_internal_maxmr(shca);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy internal MR. ret=%x", ret);
+
+       ret = ehca_dealloc_pd(&shca->pd->ib_pd);
+       if (ret)
+               ehca_err(&shca->ib_device,
+                        "Cannot destroy internal PD. ret=%x", ret);
+
+       ret = ehca_destroy_eq(shca, &shca->eq);
+       if (ret)
+               ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x", ret);
+
+       ret = ehca_destroy_eq(shca, &shca->neq);
+       if (ret)
+               ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x", ret);
+
+       ib_dealloc_device(&shca->ib_device);
+
+       spin_lock(&shca_list_lock);
+       list_del(&shca->shca_list);
+       spin_unlock(&shca_list_lock);
+
+       return ret;
+}
+
+static struct of_device_id ehca_device_table[] =
+{
+       {
+               .name       = "lhca",
+               .compatible = "IBM,lhca",
+       },
+       {},
+};
+
+static struct ibmebus_driver ehca_driver = {
+       .name     = "ehca",
+       .id_table = ehca_device_table,
+       .probe    = ehca_probe,
+       .remove   = ehca_remove,
+};
+
+void ehca_poll_eqs(unsigned long data)
+{
+       struct ehca_shca *shca;
+
+       spin_lock(&shca_list_lock);
+       list_for_each_entry(shca, &shca_list, shca_list) {
+               if (shca->eq.is_initialized)
+                       ehca_tasklet_eq((unsigned long)(void*)shca);
+       }
+       mod_timer(&poll_eqs_timer, jiffies + HZ);
+       spin_unlock(&shca_list_lock);
+}
+
+int __init ehca_module_init(void)
+{
+       int ret;
+
+       printk(KERN_INFO "eHCA Infiniband Device Driver "
+                        "(Rel.: SVNEHCA_0016)\n");
+       idr_init(&ehca_qp_idr);
+       idr_init(&ehca_cq_idr);
+       spin_lock_init(&ehca_qp_idr_lock);
+       spin_lock_init(&ehca_cq_idr_lock);
+
+       INIT_LIST_HEAD(&shca_list);
+       spin_lock_init(&shca_list_lock);
+
+       if ((ret = ehca_create_comp_pool())) {
+               ehca_gen_err("Cannot create comp pool.");
+               return ret;
+       }
+
+       if ((ret = ehca_create_slab_caches())) {
+               ehca_gen_err("Cannot create SLAB caches");
+               ret = -ENOMEM;
+               goto module_init1;
+       }
+
+       if ((ret = ibmebus_register_driver(&ehca_driver))) {
+               ehca_gen_err("Cannot register eHCA device driver");
+               ret = -EINVAL;
+               goto module_init2;
+       }
+
+       ehca_create_driver_sysfs(&ehca_driver);
+
+       if (ehca_poll_all_eqs != 1) {
+               ehca_gen_err("WARNING!!!");
+               ehca_gen_err("It is possible to lose interrupts.");
+       } else {
+               init_timer(&poll_eqs_timer);
+               poll_eqs_timer.function = ehca_poll_eqs;
+               poll_eqs_timer.expires = jiffies + HZ;
+               add_timer(&poll_eqs_timer);
+       }
+
+       return 0;
+
+module_init2:
+       ehca_destroy_slab_caches();
+
+module_init1:
+       ehca_destroy_comp_pool();
+       return ret;
+};
+
+void __exit ehca_module_exit(void)
+{
+       if (ehca_poll_all_eqs == 1)
+               del_timer_sync(&poll_eqs_timer);
+
+       ehca_remove_driver_sysfs(&ehca_driver);
+       ibmebus_unregister_driver(&ehca_driver);
+
+       ehca_destroy_slab_caches();
+
+       ehca_destroy_comp_pool();
+
+       idr_destroy(&ehca_cq_idr);
+       idr_destroy(&ehca_qp_idr);
+};
+
+module_init(ehca_module_init);
+module_exit(ehca_module_exit);
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
new file mode 100644 (file)
index 0000000..32a8706
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  mcast  functions
+ *
+ *  Authors: Khadija Souissi <souissik@de.ibm.com>
+ *           Waleri Fomin <fomin@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+
+#define MAX_MC_LID 0xFFFE
+#define MIN_MC_LID 0xC000      /* Multicast limits */
+#define EHCA_VALID_MULTICAST_GID(gid)  ((gid)[0] == 0xFF)
+#define EHCA_VALID_MULTICAST_LID(lid) \
+       (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
+
+int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+       struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+                                             ib_device);
+       union ib_gid my_gid;
+       u64 subnet_prefix, interface_id, h_ret;
+
+       if (ibqp->qp_type != IB_QPT_UD) {
+               ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
+               return -EINVAL;
+       }
+
+       if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
+               ehca_err(ibqp->device, "invalid mulitcast gid");
+               return -EINVAL;
+       } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
+               ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
+               return -EINVAL;
+       }
+
+       memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+
+       subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
+       interface_id = be64_to_cpu(my_gid.global.interface_id);
+       h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
+                                  my_qp->ipz_qp_handle,
+                                  my_qp->galpas.kernel,
+                                  lid, subnet_prefix, interface_id);
+       if (h_ret != H_SUCCESS)
+               ehca_err(ibqp->device,
+                        "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
+                        "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
+
+       return ehca2ib_return_code(h_ret);
+}
+
+int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+       struct ehca_shca *shca = container_of(ibqp->pd->device,
+                                             struct ehca_shca, ib_device);
+       union ib_gid my_gid;
+       u64 subnet_prefix, interface_id, h_ret;
+
+       if (ibqp->qp_type != IB_QPT_UD) {
+               ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
+               return -EINVAL;
+       }
+
+       if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
+               ehca_err(ibqp->device, "invalid mulitcast gid");
+               return -EINVAL;
+       } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
+               ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
+               return -EINVAL;
+       }
+
+       memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+
+       subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
+       interface_id = be64_to_cpu(my_gid.global.interface_id);
+       h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
+                                  my_qp->ipz_qp_handle,
+                                  my_qp->galpas.kernel,
+                                  lid, subnet_prefix, interface_id);
+       if (h_ret != H_SUCCESS)
+               ehca_err(ibqp->device,
+                        "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
+                        "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
+
+       return ehca2ib_return_code(h_ret);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
new file mode 100644 (file)
index 0000000..5ca6544
--- /dev/null
@@ -0,0 +1,2261 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  MR/MW functions
+ *
+ *  Authors: Dietmar Decker <ddecker@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_iverbs.h"
+#include "ehca_mrmw.h"
+#include "hcp_if.h"
+#include "hipz_hw.h"
+
+static struct kmem_cache *mr_cache;
+static struct kmem_cache *mw_cache;
+
+static struct ehca_mr *ehca_mr_new(void)
+{
+       struct ehca_mr *me;
+
+       me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
+       if (me) {
+               memset(me, 0, sizeof(struct ehca_mr));
+               spin_lock_init(&me->mrlock);
+       } else
+               ehca_gen_err("alloc failed");
+
+       return me;
+}
+
+static void ehca_mr_delete(struct ehca_mr *me)
+{
+       kmem_cache_free(mr_cache, me);
+}
+
+static struct ehca_mw *ehca_mw_new(void)
+{
+       struct ehca_mw *me;
+
+       me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
+       if (me) {
+               memset(me, 0, sizeof(struct ehca_mw));
+               spin_lock_init(&me->mwlock);
+       } else
+               ehca_gen_err("alloc failed");
+
+       return me;
+}
+
+static void ehca_mw_delete(struct ehca_mw *me)
+{
+       kmem_cache_free(mw_cache, me);
+}
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
+{
+       struct ib_mr *ib_mr;
+       int ret;
+       struct ehca_mr *e_maxmr;
+       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+       struct ehca_shca *shca =
+               container_of(pd->device, struct ehca_shca, ib_device);
+
+       if (shca->maxmr) {
+               e_maxmr = ehca_mr_new();
+               if (!e_maxmr) {
+                       ehca_err(&shca->ib_device, "out of memory");
+                       ib_mr = ERR_PTR(-ENOMEM);
+                       goto get_dma_mr_exit0;
+               }
+
+               ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
+                                    mr_access_flags, e_pd,
+                                    &e_maxmr->ib.ib_mr.lkey,
+                                    &e_maxmr->ib.ib_mr.rkey);
+               if (ret) {
+                       ib_mr = ERR_PTR(ret);
+                       goto get_dma_mr_exit0;
+               }
+               ib_mr = &e_maxmr->ib.ib_mr;
+       } else {
+               ehca_err(&shca->ib_device, "no internal max-MR exist!");
+               ib_mr = ERR_PTR(-EINVAL);
+               goto get_dma_mr_exit0;
+       }
+
+get_dma_mr_exit0:
+       if (IS_ERR(ib_mr))
+               ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
+                        PTR_ERR(ib_mr), pd, mr_access_flags);
+       return ib_mr;
+} /* end ehca_get_dma_mr() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
+                              struct ib_phys_buf *phys_buf_array,
+                              int num_phys_buf,
+                              int mr_access_flags,
+                              u64 *iova_start)
+{
+       struct ib_mr *ib_mr;
+       int ret;
+       struct ehca_mr *e_mr;
+       struct ehca_shca *shca =
+               container_of(pd->device, struct ehca_shca, ib_device);
+       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+       u64 size;
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+       u32 num_pages_mr;
+       u32 num_pages_4k; /* 4k portion "pages" */
+
+       if ((num_phys_buf <= 0) || !phys_buf_array) {
+               ehca_err(pd->device, "bad input values: num_phys_buf=%x "
+                        "phys_buf_array=%p", num_phys_buf, phys_buf_array);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_phys_mr_exit0;
+       }
+       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
+               /*
+                * Remote Write Access requires Local Write Access
+                * Remote Atomic Access requires Local Write Access
+                */
+               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+                        mr_access_flags);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_phys_mr_exit0;
+       }
+
+       /* check physical buffer list and calculate size */
+       ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
+                                           iova_start, &size);
+       if (ret) {
+               ib_mr = ERR_PTR(ret);
+               goto reg_phys_mr_exit0;
+       }
+       if ((size == 0) ||
+           (((u64)iova_start + size) < (u64)iova_start)) {
+               ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
+                        size, iova_start);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_phys_mr_exit0;
+       }
+
+       e_mr = ehca_mr_new();
+       if (!e_mr) {
+               ehca_err(pd->device, "out of memory");
+               ib_mr = ERR_PTR(-ENOMEM);
+               goto reg_phys_mr_exit0;
+       }
+
+       /* determine number of MR pages */
+       num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
+                        PAGE_SIZE - 1) / PAGE_SIZE);
+       num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
+                        EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+
+       /* register MR on HCA */
+       if (ehca_mr_is_maxmr(size, iova_start)) {
+               e_mr->flags |= EHCA_MR_FLAG_MAXMR;
+               ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
+                                    e_pd, &e_mr->ib.ib_mr.lkey,
+                                    &e_mr->ib.ib_mr.rkey);
+               if (ret) {
+                       ib_mr = ERR_PTR(ret);
+                       goto reg_phys_mr_exit1;
+               }
+       } else {
+               pginfo.type           = EHCA_MR_PGI_PHYS;
+               pginfo.num_pages      = num_pages_mr;
+               pginfo.num_4k         = num_pages_4k;
+               pginfo.num_phys_buf   = num_phys_buf;
+               pginfo.phys_buf_array = phys_buf_array;
+               pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
+                                        EHCA_PAGESIZE);
+
+               ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
+                                 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+                                 &e_mr->ib.ib_mr.rkey);
+               if (ret) {
+                       ib_mr = ERR_PTR(ret);
+                       goto reg_phys_mr_exit1;
+               }
+       }
+
+       /* successful registration of all pages */
+       return &e_mr->ib.ib_mr;
+
+reg_phys_mr_exit1:
+       ehca_mr_delete(e_mr);
+reg_phys_mr_exit0:
+       if (IS_ERR(ib_mr))
+               ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
+                        "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
+                        PTR_ERR(ib_mr), pd, phys_buf_array,
+                        num_phys_buf, mr_access_flags, iova_start);
+       return ib_mr;
+} /* end ehca_reg_phys_mr() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
+                              struct ib_umem *region,
+                              int mr_access_flags,
+                              struct ib_udata *udata)
+{
+       struct ib_mr *ib_mr;
+       struct ehca_mr *e_mr;
+       struct ehca_shca *shca =
+               container_of(pd->device, struct ehca_shca, ib_device);
+       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+       int ret;
+       u32 num_pages_mr;
+       u32 num_pages_4k; /* 4k portion "pages" */
+
+       if (!pd) {
+               ehca_gen_err("bad pd=%p", pd);
+               return ERR_PTR(-EFAULT);
+       }
+       if (!region) {
+               ehca_err(pd->device, "bad input values: region=%p", region);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_user_mr_exit0;
+       }
+       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
+               /*
+                * Remote Write Access requires Local Write Access
+                * Remote Atomic Access requires Local Write Access
+                */
+               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+                        mr_access_flags);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_user_mr_exit0;
+       }
+       if (region->page_size != PAGE_SIZE) {
+               ehca_err(pd->device, "page size not supported, "
+                        "region->page_size=%x", region->page_size);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_user_mr_exit0;
+       }
+
+       if ((region->length == 0) ||
+           ((region->virt_base + region->length) < region->virt_base)) {
+               ehca_err(pd->device, "bad input values: length=%lx "
+                        "virt_base=%lx", region->length, region->virt_base);
+               ib_mr = ERR_PTR(-EINVAL);
+               goto reg_user_mr_exit0;
+       }
+
+       e_mr = ehca_mr_new();
+       if (!e_mr) {
+               ehca_err(pd->device, "out of memory");
+               ib_mr = ERR_PTR(-ENOMEM);
+               goto reg_user_mr_exit0;
+       }
+
+       /* determine number of MR pages */
+       num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length +
+                        PAGE_SIZE - 1) / PAGE_SIZE);
+       num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length +
+                        EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+
+       /* register MR on HCA */
+       pginfo.type       = EHCA_MR_PGI_USER;
+       pginfo.num_pages  = num_pages_mr;
+       pginfo.num_4k     = num_pages_4k;
+       pginfo.region     = region;
+       pginfo.next_4k    = region->offset / EHCA_PAGESIZE;
+       pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
+                                              (&region->chunk_list),
+                                              list);
+
+       ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base,
+                         region->length, mr_access_flags, e_pd, &pginfo,
+                         &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
+       if (ret) {
+               ib_mr = ERR_PTR(ret);
+               goto reg_user_mr_exit1;
+       }
+
+       /* successful registration of all pages */
+       return &e_mr->ib.ib_mr;
+
+reg_user_mr_exit1:
+       ehca_mr_delete(e_mr);
+reg_user_mr_exit0:
+       if (IS_ERR(ib_mr))
+               ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
+                        " udata=%p",
+                        PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
+       return ib_mr;
+} /* end ehca_reg_user_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_rereg_phys_mr(struct ib_mr *mr,
+                      int mr_rereg_mask,
+                      struct ib_pd *pd,
+                      struct ib_phys_buf *phys_buf_array,
+                      int num_phys_buf,
+                      int mr_access_flags,
+                      u64 *iova_start)
+{
+       int ret;
+
+       struct ehca_shca *shca =
+               container_of(mr->device, struct ehca_shca, ib_device);
+       struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+       struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+       u64 new_size;
+       u64 *new_start;
+       u32 new_acl;
+       struct ehca_pd *new_pd;
+       u32 tmp_lkey, tmp_rkey;
+       unsigned long sl_flags;
+       u32 num_pages_mr = 0;
+       u32 num_pages_4k = 0; /* 4k portion "pages" */
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+       u32 cur_pid = current->tgid;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           (my_pd->ownpid != cur_pid)) {
+               ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               ret = -EINVAL;
+               goto rereg_phys_mr_exit0;
+       }
+
+       if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
+               /* TODO not supported, because PHYP rereg hCall needs pages */
+               ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
+                        "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
+               ret = -EINVAL;
+               goto rereg_phys_mr_exit0;
+       }
+
+       if (mr_rereg_mask & IB_MR_REREG_PD) {
+               if (!pd) {
+                       ehca_err(mr->device, "rereg with bad pd, pd=%p "
+                                "mr_rereg_mask=%x", pd, mr_rereg_mask);
+                       ret = -EINVAL;
+                       goto rereg_phys_mr_exit0;
+               }
+       }
+
+       if ((mr_rereg_mask &
+            ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
+           (mr_rereg_mask == 0)) {
+               ret = -EINVAL;
+               goto rereg_phys_mr_exit0;
+       }
+
+       /* check other parameters */
+       if (e_mr == shca->maxmr) {
+               /* should be impossible, however reject to be sure */
+               ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
+                        "shca->maxmr=%p mr->lkey=%x",
+                        mr, shca->maxmr, mr->lkey);
+               ret = -EINVAL;
+               goto rereg_phys_mr_exit0;
+       }
+       if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
+               if (e_mr->flags & EHCA_MR_FLAG_FMR) {
+                       ehca_err(mr->device, "not supported for FMR, mr=%p "
+                                "flags=%x", mr, e_mr->flags);
+                       ret = -EINVAL;
+                       goto rereg_phys_mr_exit0;
+               }
+               if (!phys_buf_array || num_phys_buf <= 0) {
+                       ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
+                                " phys_buf_array=%p num_phys_buf=%x",
+                                mr_rereg_mask, phys_buf_array, num_phys_buf);
+                       ret = -EINVAL;
+                       goto rereg_phys_mr_exit0;
+               }
+       }
+       if ((mr_rereg_mask & IB_MR_REREG_ACCESS) &&     /* change ACL */
+           (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+             !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+            ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+             !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
+               /*
+                * Remote Write Access requires Local Write Access
+                * Remote Atomic Access requires Local Write Access
+                */
+               ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
+                        "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
+               ret = -EINVAL;
+               goto rereg_phys_mr_exit0;
+       }
+
+       /* set requested values dependent on rereg request */
+       spin_lock_irqsave(&e_mr->mrlock, sl_flags);
+       new_start = e_mr->start;  /* new == old address */
+       new_size  = e_mr->size;   /* new == old length */
+       new_acl   = e_mr->acl;    /* new == old access control */
+       new_pd    = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
+
+       if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+               new_start = iova_start; /* change address */
+               /* check physical buffer list and calculate size */
+               ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
+                                                   num_phys_buf, iova_start,
+                                                   &new_size);
+               if (ret)
+                       goto rereg_phys_mr_exit1;
+               if ((new_size == 0) ||
+                   (((u64)iova_start + new_size) < (u64)iova_start)) {
+                       ehca_err(mr->device, "bad input values: new_size=%lx "
+                                "iova_start=%p", new_size, iova_start);
+                       ret = -EINVAL;
+                       goto rereg_phys_mr_exit1;
+               }
+               num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
+                                PAGE_SIZE - 1) / PAGE_SIZE);
+               num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
+                                EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+               pginfo.type           = EHCA_MR_PGI_PHYS;
+               pginfo.num_pages      = num_pages_mr;
+               pginfo.num_4k         = num_pages_4k;
+               pginfo.num_phys_buf   = num_phys_buf;
+               pginfo.phys_buf_array = phys_buf_array;
+               pginfo.next_4k        = (((u64)iova_start & ~PAGE_MASK) /
+                                        EHCA_PAGESIZE);
+       }
+       if (mr_rereg_mask & IB_MR_REREG_ACCESS)
+               new_acl = mr_access_flags;
+       if (mr_rereg_mask & IB_MR_REREG_PD)
+               new_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+       ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
+                           new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
+       if (ret)
+               goto rereg_phys_mr_exit1;
+
+       /* successful reregistration */
+       if (mr_rereg_mask & IB_MR_REREG_PD)
+               mr->pd = pd;
+       mr->lkey = tmp_lkey;
+       mr->rkey = tmp_rkey;
+
+rereg_phys_mr_exit1:
+       spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
+rereg_phys_mr_exit0:
+       if (ret)
+               ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
+                        "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
+                        "iova_start=%p",
+                        ret, mr, mr_rereg_mask, pd, phys_buf_array,
+                        num_phys_buf, mr_access_flags, iova_start);
+       return ret;
+} /* end ehca_rereg_phys_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
+{
+       int ret = 0;
+       u64 h_ret;
+       struct ehca_shca *shca =
+               container_of(mr->device, struct ehca_shca, ib_device);
+       struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+       struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+       u32 cur_pid = current->tgid;
+       unsigned long sl_flags;
+       struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           (my_pd->ownpid != cur_pid)) {
+               ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               ret = -EINVAL;
+               goto query_mr_exit0;
+       }
+
+       if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
+               ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
+                        "e_mr->flags=%x", mr, e_mr, e_mr->flags);
+               ret = -EINVAL;
+               goto query_mr_exit0;
+       }
+
+       memset(mr_attr, 0, sizeof(struct ib_mr_attr));
+       spin_lock_irqsave(&e_mr->mrlock, sl_flags);
+
+       h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
+                        "hca_hndl=%lx mr_hndl=%lx lkey=%x",
+                        h_ret, mr, shca->ipz_hca_handle.handle,
+                        e_mr->ipz_mr_handle.handle, mr->lkey);
+               ret = ehca_mrmw_map_hrc_query_mr(h_ret);
+               goto query_mr_exit1;
+       }
+       mr_attr->pd               = mr->pd;
+       mr_attr->device_virt_addr = hipzout.vaddr;
+       mr_attr->size             = hipzout.len;
+       mr_attr->lkey             = hipzout.lkey;
+       mr_attr->rkey             = hipzout.rkey;
+       ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
+
+query_mr_exit1:
+       spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
+query_mr_exit0:
+       if (ret)
+               ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
+                        ret, mr, mr_attr);
+       return ret;
+} /* end ehca_query_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dereg_mr(struct ib_mr *mr)
+{
+       int ret = 0;
+       u64 h_ret;
+       struct ehca_shca *shca =
+               container_of(mr->device, struct ehca_shca, ib_device);
+       struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+       struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+       u32 cur_pid = current->tgid;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           (my_pd->ownpid != cur_pid)) {
+               ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               ret = -EINVAL;
+               goto dereg_mr_exit0;
+       }
+
+       if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
+               ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
+                        "e_mr->flags=%x", mr, e_mr, e_mr->flags);
+               ret = -EINVAL;
+               goto dereg_mr_exit0;
+       } else if (e_mr == shca->maxmr) {
+               /* should be impossible, however reject to be sure */
+               ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
+                        "shca->maxmr=%p mr->lkey=%x",
+                        mr, shca->maxmr, mr->lkey);
+               ret = -EINVAL;
+               goto dereg_mr_exit0;
+       }
+
+       /* TODO: BUSY: MR still has bound window(s) */
+       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
+                        "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+                        h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
+                        e_mr->ipz_mr_handle.handle, mr->lkey);
+               ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+               goto dereg_mr_exit0;
+       }
+
+       /* successful deregistration */
+       ehca_mr_delete(e_mr);
+
+dereg_mr_exit0:
+       if (ret)
+               ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
+       return ret;
+} /* end ehca_dereg_mr() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
+{
+       struct ib_mw *ib_mw;
+       u64 h_ret;
+       struct ehca_mw *e_mw;
+       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+       struct ehca_shca *shca =
+               container_of(pd->device, struct ehca_shca, ib_device);
+       struct ehca_mw_hipzout_parms hipzout = {{0},0};
+
+       e_mw = ehca_mw_new();
+       if (!e_mw) {
+               ib_mw = ERR_PTR(-ENOMEM);
+               goto alloc_mw_exit0;
+       }
+
+       h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
+                                        e_pd->fw_pd, &hipzout);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
+                        "shca=%p hca_hndl=%lx mw=%p",
+                        h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
+               ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
+               goto alloc_mw_exit1;
+       }
+       /* successful MW allocation */
+       e_mw->ipz_mw_handle = hipzout.handle;
+       e_mw->ib_mw.rkey    = hipzout.rkey;
+       return &e_mw->ib_mw;
+
+alloc_mw_exit1:
+       ehca_mw_delete(e_mw);
+alloc_mw_exit0:
+       if (IS_ERR(ib_mw))
+               ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
+       return ib_mw;
+} /* end ehca_alloc_mw() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_bind_mw(struct ib_qp *qp,
+                struct ib_mw *mw,
+                struct ib_mw_bind *mw_bind)
+{
+       /* TODO: not supported up to now */
+       ehca_gen_err("bind MW currently not supported by HCAD");
+
+       return -EPERM;
+} /* end ehca_bind_mw() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dealloc_mw(struct ib_mw *mw)
+{
+       u64 h_ret;
+       struct ehca_shca *shca =
+               container_of(mw->device, struct ehca_shca, ib_device);
+       struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
+
+       h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
+                        "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
+                        h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
+                        e_mw->ipz_mw_handle.handle);
+               return ehca_mrmw_map_hrc_free_mw(h_ret);
+       }
+       /* successful deallocation */
+       ehca_mw_delete(e_mw);
+       return 0;
+} /* end ehca_dealloc_mw() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
+                             int mr_access_flags,
+                             struct ib_fmr_attr *fmr_attr)
+{
+       struct ib_fmr *ib_fmr;
+       struct ehca_shca *shca =
+               container_of(pd->device, struct ehca_shca, ib_device);
+       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+       struct ehca_mr *e_fmr;
+       int ret;
+       u32 tmp_lkey, tmp_rkey;
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+
+       /* check other parameters */
+       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
+               /*
+                * Remote Write Access requires Local Write Access
+                * Remote Atomic Access requires Local Write Access
+                */
+               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+                        mr_access_flags);
+               ib_fmr = ERR_PTR(-EINVAL);
+               goto alloc_fmr_exit0;
+       }
+       if (mr_access_flags & IB_ACCESS_MW_BIND) {
+               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+                        mr_access_flags);
+               ib_fmr = ERR_PTR(-EINVAL);
+               goto alloc_fmr_exit0;
+       }
+       if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
+               ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
+                        "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
+                        fmr_attr->max_pages, fmr_attr->max_maps,
+                        fmr_attr->page_shift);
+               ib_fmr = ERR_PTR(-EINVAL);
+               goto alloc_fmr_exit0;
+       }
+       if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
+           ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
+               ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
+                        fmr_attr->page_shift);
+               ib_fmr = ERR_PTR(-EINVAL);
+               goto alloc_fmr_exit0;
+       }
+
+       e_fmr = ehca_mr_new();
+       if (!e_fmr) {
+               ib_fmr = ERR_PTR(-ENOMEM);
+               goto alloc_fmr_exit0;
+       }
+       e_fmr->flags |= EHCA_MR_FLAG_FMR;
+
+       /* register MR on HCA */
+       ret = ehca_reg_mr(shca, e_fmr, NULL,
+                         fmr_attr->max_pages * (1 << fmr_attr->page_shift),
+                         mr_access_flags, e_pd, &pginfo,
+                         &tmp_lkey, &tmp_rkey);
+       if (ret) {
+               ib_fmr = ERR_PTR(ret);
+               goto alloc_fmr_exit1;
+       }
+
+       /* successful */
+       e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
+       e_fmr->fmr_max_pages = fmr_attr->max_pages;
+       e_fmr->fmr_max_maps = fmr_attr->max_maps;
+       e_fmr->fmr_map_cnt = 0;
+       return &e_fmr->ib.ib_fmr;
+
+alloc_fmr_exit1:
+       ehca_mr_delete(e_fmr);
+alloc_fmr_exit0:
+       if (IS_ERR(ib_fmr))
+               ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
+                        "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
+                        mr_access_flags, fmr_attr);
+       return ib_fmr;
+} /* end ehca_alloc_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_map_phys_fmr(struct ib_fmr *fmr,
+                     u64 *page_list,
+                     int list_len,
+                     u64 iova)
+{
+       int ret;
+       struct ehca_shca *shca =
+               container_of(fmr->device, struct ehca_shca, ib_device);
+       struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
+       struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+       u32 tmp_lkey, tmp_rkey;
+
+       if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
+               ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+                        e_fmr, e_fmr->flags);
+               ret = -EINVAL;
+               goto map_phys_fmr_exit0;
+       }
+       ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
+       if (ret)
+               goto map_phys_fmr_exit0;
+       if (iova % e_fmr->fmr_page_size) {
+               /* only whole-numbered pages */
+               ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
+                        iova, e_fmr->fmr_page_size);
+               ret = -EINVAL;
+               goto map_phys_fmr_exit0;
+       }
+       if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
+               /* HCAD does not limit the maps, however trace this anyway */
+               ehca_info(fmr->device, "map limit exceeded, fmr=%p "
+                         "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
+                         fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
+       }
+
+       pginfo.type      = EHCA_MR_PGI_FMR;
+       pginfo.num_pages = list_len;
+       pginfo.num_4k    = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
+       pginfo.page_list = page_list;
+       pginfo.next_4k   = ((iova & (e_fmr->fmr_page_size-1)) /
+                           EHCA_PAGESIZE);
+
+       ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
+                           list_len * e_fmr->fmr_page_size,
+                           e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
+       if (ret)
+               goto map_phys_fmr_exit0;
+
+       /* successful reregistration */
+       e_fmr->fmr_map_cnt++;
+       e_fmr->ib.ib_fmr.lkey = tmp_lkey;
+       e_fmr->ib.ib_fmr.rkey = tmp_rkey;
+       return 0;
+
+map_phys_fmr_exit0:
+       if (ret)
+               ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
+                        "iova=%lx",
+                        ret, fmr, page_list, list_len, iova);
+       return ret;
+} /* end ehca_map_phys_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_unmap_fmr(struct list_head *fmr_list)
+{
+       int ret = 0;
+       struct ib_fmr *ib_fmr;
+       struct ehca_shca *shca = NULL;
+       struct ehca_shca *prev_shca;
+       struct ehca_mr *e_fmr;
+       u32 num_fmr = 0;
+       u32 unmap_fmr_cnt = 0;
+
+       /* check all FMR belong to same SHCA, and check internal flag */
+       list_for_each_entry(ib_fmr, fmr_list, list) {
+               prev_shca = shca;
+               if (!ib_fmr) {
+                       ehca_gen_err("bad fmr=%p in list", ib_fmr);
+                       ret = -EINVAL;
+                       goto unmap_fmr_exit0;
+               }
+               shca = container_of(ib_fmr->device, struct ehca_shca,
+                                   ib_device);
+               e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
+               if ((shca != prev_shca) && prev_shca) {
+                       ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
+                                "prev_shca=%p e_fmr=%p",
+                                shca, prev_shca, e_fmr);
+                       ret = -EINVAL;
+                       goto unmap_fmr_exit0;
+               }
+               if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
+                       ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
+                                "e_fmr->flags=%x", e_fmr, e_fmr->flags);
+                       ret = -EINVAL;
+                       goto unmap_fmr_exit0;
+               }
+               num_fmr++;
+       }
+
+       /* loop over all FMRs to unmap */
+       list_for_each_entry(ib_fmr, fmr_list, list) {
+               unmap_fmr_cnt++;
+               e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
+               shca = container_of(ib_fmr->device, struct ehca_shca,
+                                   ib_device);
+               ret = ehca_unmap_one_fmr(shca, e_fmr);
+               if (ret) {
+                       /* unmap failed, stop unmapping of rest of FMRs */
+                       ehca_err(&shca->ib_device, "unmap of one FMR failed, "
+                                "stop rest, e_fmr=%p num_fmr=%x "
+                                "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
+                                unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
+                       goto unmap_fmr_exit0;
+               }
+       }
+
+unmap_fmr_exit0:
+       if (ret)
+               ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
+                            ret, fmr_list, num_fmr, unmap_fmr_cnt);
+       return ret;
+} /* end ehca_unmap_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dealloc_fmr(struct ib_fmr *fmr)
+{
+       int ret;
+       u64 h_ret;
+       struct ehca_shca *shca =
+               container_of(fmr->device, struct ehca_shca, ib_device);
+       struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
+
+       if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
+               ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+                        e_fmr, e_fmr->flags);
+               ret = -EINVAL;
+               goto free_fmr_exit0;
+       }
+
+       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
+                        "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
+                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
+                        e_fmr->ipz_mr_handle.handle, fmr->lkey);
+               ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+               goto free_fmr_exit0;
+       }
+       /* successful deregistration */
+       ehca_mr_delete(e_fmr);
+       return 0;
+
+free_fmr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
+       return ret;
+} /* end ehca_dealloc_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_mr(struct ehca_shca *shca,
+               struct ehca_mr *e_mr,
+               u64 *iova_start,
+               u64 size,
+               int acl,
+               struct ehca_pd *e_pd,
+               struct ehca_mr_pginfo *pginfo,
+               u32 *lkey, /*OUT*/
+               u32 *rkey) /*OUT*/
+{
+       int ret;
+       u64 h_ret;
+       u32 hipz_acl;
+       struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+       ehca_mrmw_map_acl(acl, &hipz_acl);
+       ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+       if (ehca_use_hp_mr == 1)
+               hipz_acl |= 0x00000001;
+
+       h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
+                                        (u64)iova_start, size, hipz_acl,
+                                        e_pd->fw_pd, &hipzout);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
+                        "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
+               ret = ehca_mrmw_map_hrc_alloc(h_ret);
+               goto ehca_reg_mr_exit0;
+       }
+
+       e_mr->ipz_mr_handle = hipzout.handle;
+
+       ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+       if (ret)
+               goto ehca_reg_mr_exit1;
+
+       /* successful registration */
+       e_mr->num_pages = pginfo->num_pages;
+       e_mr->num_4k    = pginfo->num_4k;
+       e_mr->start     = iova_start;
+       e_mr->size      = size;
+       e_mr->acl       = acl;
+       *lkey = hipzout.lkey;
+       *rkey = hipzout.rkey;
+       return 0;
+
+ehca_reg_mr_exit1:
+       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
+                        "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
+                        "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
+                        h_ret, shca, e_mr, iova_start, size, acl, e_pd,
+                        hipzout.lkey, pginfo, pginfo->num_pages,
+                        pginfo->num_4k, ret);
+               ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
+                        "not recoverable");
+       }
+ehca_reg_mr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+                        "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
+                        "num_pages=%lx num_4k=%lx",
+                        ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
+                        pginfo->num_pages, pginfo->num_4k);
+       return ret;
+} /* end ehca_reg_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_mr_rpages(struct ehca_shca *shca,
+                      struct ehca_mr *e_mr,
+                      struct ehca_mr_pginfo *pginfo)
+{
+       int ret = 0;
+       u64 h_ret;
+       u32 rnum;
+       u64 rpage;
+       u32 i;
+       u64 *kpage;
+
+       kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!kpage) {
+               ehca_err(&shca->ib_device, "kpage alloc failed");
+               ret = -ENOMEM;
+               goto ehca_reg_mr_rpages_exit0;
+       }
+
+       /* max 512 pages per shot */
+       for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
+
+               if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
+                       rnum = pginfo->num_4k % 512; /* last shot */
+                       if (rnum == 0)
+                               rnum = 512;      /* last shot is full */
+               } else
+                       rnum = 512;
+
+               if (rnum > 1) {
+                       ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
+                       if (ret) {
+                               ehca_err(&shca->ib_device, "ehca_set_pagebuf "
+                                        "bad rc, ret=%x rnum=%x kpage=%p",
+                                        ret, rnum, kpage);
+                               ret = -EFAULT;
+                               goto ehca_reg_mr_rpages_exit1;
+                       }
+                       rpage = virt_to_abs(kpage);
+                       if (!rpage) {
+                               ehca_err(&shca->ib_device, "kpage=%p i=%x",
+                                        kpage, i);
+                               ret = -EFAULT;
+                               goto ehca_reg_mr_rpages_exit1;
+                       }
+               } else {  /* rnum==1 */
+                       ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
+                       if (ret) {
+                               ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
+                                        "bad rc, ret=%x i=%x", ret, i);
+                               ret = -EFAULT;
+                               goto ehca_reg_mr_rpages_exit1;
+                       }
+               }
+
+               h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
+                                                0, /* pagesize 4k */
+                                                0, rpage, rnum);
+
+               if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
+                       /*
+                        * check for 'registration complete'==H_SUCCESS
+                        * and for 'page registered'==H_PAGE_REGISTERED
+                        */
+                       if (h_ret != H_SUCCESS) {
+                               ehca_err(&shca->ib_device, "last "
+                                        "hipz_reg_rpage_mr failed, h_ret=%lx "
+                                        "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
+                                        " lkey=%x", h_ret, e_mr, i,
+                                        shca->ipz_hca_handle.handle,
+                                        e_mr->ipz_mr_handle.handle,
+                                        e_mr->ib.ib_mr.lkey);
+                               ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
+                               break;
+                       } else
+                               ret = 0;
+               } else if (h_ret != H_PAGE_REGISTERED) {
+                       ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
+                                "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
+                                "mr_hndl=%lx", h_ret, e_mr, i,
+                                e_mr->ib.ib_mr.lkey,
+                                shca->ipz_hca_handle.handle,
+                                e_mr->ipz_mr_handle.handle);
+                       ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
+                       break;
+               } else
+                       ret = 0;
+       } /* end for(i) */
+
+
+ehca_reg_mr_rpages_exit1:
+       kfree(kpage);
+ehca_reg_mr_rpages_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
+                        "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
+                        pginfo->num_pages, pginfo->num_4k);
+       return ret;
+} /* end ehca_reg_mr_rpages() */
+
+/*----------------------------------------------------------------------*/
+
+inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
+                               struct ehca_mr *e_mr,
+                               u64 *iova_start,
+                               u64 size,
+                               u32 acl,
+                               struct ehca_pd *e_pd,
+                               struct ehca_mr_pginfo *pginfo,
+                               u32 *lkey, /*OUT*/
+                               u32 *rkey) /*OUT*/
+{
+       int ret;
+       u64 h_ret;
+       u32 hipz_acl;
+       u64 *kpage;
+       u64 rpage;
+       struct ehca_mr_pginfo pginfo_save;
+       struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+       ehca_mrmw_map_acl(acl, &hipz_acl);
+       ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+
+       kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (!kpage) {
+               ehca_err(&shca->ib_device, "kpage alloc failed");
+               ret = -ENOMEM;
+               goto ehca_rereg_mr_rereg1_exit0;
+       }
+
+       pginfo_save = *pginfo;
+       ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
+       if (ret) {
+               ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
+                        "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
+                        e_mr, pginfo, pginfo->type, pginfo->num_pages,
+                        pginfo->num_4k,kpage);
+               goto ehca_rereg_mr_rereg1_exit1;
+       }
+       rpage = virt_to_abs(kpage);
+       if (!rpage) {
+               ehca_err(&shca->ib_device, "kpage=%p", kpage);
+               ret = -EFAULT;
+               goto ehca_rereg_mr_rereg1_exit1;
+       }
+       h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
+                                     (u64)iova_start, size, hipz_acl,
+                                     e_pd->fw_pd, rpage, &hipzout);
+       if (h_ret != H_SUCCESS) {
+               /*
+                * reregistration unsuccessful, try it again with the 3 hCalls,
+                * e.g. this is required in case H_MR_CONDITION
+                * (MW bound or MR is shared)
+                */
+               ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
+                         "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
+               *pginfo = pginfo_save;
+               ret = -EAGAIN;
+       } else if ((u64*)hipzout.vaddr != iova_start) {
+               ehca_err(&shca->ib_device, "PHYP changed iova_start in "
+                        "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
+                        "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
+                        hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
+                        e_mr->ib.ib_mr.lkey, hipzout.lkey);
+               ret = -EFAULT;
+       } else {
+               /*
+                * successful reregistration
+                * note: start and start_out are identical for eServer HCAs
+                */
+               e_mr->num_pages = pginfo->num_pages;
+               e_mr->num_4k    = pginfo->num_4k;
+               e_mr->start     = iova_start;
+               e_mr->size      = size;
+               e_mr->acl       = acl;
+               *lkey = hipzout.lkey;
+               *rkey = hipzout.rkey;
+       }
+
+ehca_rereg_mr_rereg1_exit1:
+       kfree(kpage);
+ehca_rereg_mr_rereg1_exit0:
+       if ( ret && (ret != -EAGAIN) )
+               ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
+                        "pginfo=%p num_pages=%lx num_4k=%lx",
+                        ret, *lkey, *rkey, pginfo, pginfo->num_pages,
+                        pginfo->num_4k);
+       return ret;
+} /* end ehca_rereg_mr_rereg1() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_rereg_mr(struct ehca_shca *shca,
+                 struct ehca_mr *e_mr,
+                 u64 *iova_start,
+                 u64 size,
+                 int acl,
+                 struct ehca_pd *e_pd,
+                 struct ehca_mr_pginfo *pginfo,
+                 u32 *lkey,
+                 u32 *rkey)
+{
+       int ret = 0;
+       u64 h_ret;
+       int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
+       int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
+
+       /* first determine reregistration hCall(s) */
+       if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
+           (pginfo->num_4k > e_mr->num_4k)) {
+               ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
+                        "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
+               rereg_1_hcall = 0;
+               rereg_3_hcall = 1;
+       }
+
+       if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
+               rereg_1_hcall = 0;
+               rereg_3_hcall = 1;
+               e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
+               ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
+                        e_mr);
+       }
+
+       if (rereg_1_hcall) {
+               ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
+                                          acl, e_pd, pginfo, lkey, rkey);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               rereg_3_hcall = 1;
+                       else
+                               goto ehca_rereg_mr_exit0;
+               }
+       }
+
+       if (rereg_3_hcall) {
+               struct ehca_mr save_mr;
+
+               /* first deregister old MR */
+               h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+               if (h_ret != H_SUCCESS) {
+                       ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+                                "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
+                                "mr->lkey=%x",
+                                h_ret, e_mr, shca->ipz_hca_handle.handle,
+                                e_mr->ipz_mr_handle.handle,
+                                e_mr->ib.ib_mr.lkey);
+                       ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+                       goto ehca_rereg_mr_exit0;
+               }
+               /* clean ehca_mr_t, without changing struct ib_mr and lock */
+               save_mr = *e_mr;
+               ehca_mr_deletenew(e_mr);
+
+               /* set some MR values */
+               e_mr->flags = save_mr.flags;
+               e_mr->fmr_page_size = save_mr.fmr_page_size;
+               e_mr->fmr_max_pages = save_mr.fmr_max_pages;
+               e_mr->fmr_max_maps = save_mr.fmr_max_maps;
+               e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
+
+               ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
+                                     e_pd, pginfo, lkey, rkey);
+               if (ret) {
+                       u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
+                       memcpy(&e_mr->flags, &(save_mr.flags),
+                              sizeof(struct ehca_mr) - offset);
+                       goto ehca_rereg_mr_exit0;
+               }
+       }
+
+ehca_rereg_mr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+                        "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
+                        "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+                        "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
+                        acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
+                        rereg_1_hcall, rereg_3_hcall);
+       return ret;
+} /* end ehca_rereg_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_unmap_one_fmr(struct ehca_shca *shca,
+                      struct ehca_mr *e_fmr)
+{
+       int ret = 0;
+       u64 h_ret;
+       int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
+       int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
+       struct ehca_pd *e_pd =
+               container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
+       struct ehca_mr save_fmr;
+       u32 tmp_lkey, tmp_rkey;
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+       struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+       /* first check if reregistration hCall can be used for unmap */
+       if (e_fmr->fmr_max_pages > 512) {
+               rereg_1_hcall = 0;
+               rereg_3_hcall = 1;
+       }
+
+       if (rereg_1_hcall) {
+               /*
+                * note: after using rereg hcall with len=0,
+                * rereg hcall must be used again for registering pages
+                */
+               h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
+                                             0, 0, e_pd->fw_pd, 0, &hipzout);
+               if (h_ret != H_SUCCESS) {
+                       /*
+                        * should not happen, because length checked above,
+                        * FMRs are not shared and no MW bound to FMRs
+                        */
+                       ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
+                                "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
+                                "mr_hndl=%lx lkey=%x lkey_out=%x",
+                                h_ret, e_fmr, shca->ipz_hca_handle.handle,
+                                e_fmr->ipz_mr_handle.handle,
+                                e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
+                       rereg_3_hcall = 1;
+               } else {
+                       /* successful reregistration */
+                       e_fmr->start = NULL;
+                       e_fmr->size = 0;
+                       tmp_lkey = hipzout.lkey;
+                       tmp_rkey = hipzout.rkey;
+               }
+       }
+
+       if (rereg_3_hcall) {
+               struct ehca_mr save_mr;
+
+               /* first free old FMR */
+               h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+               if (h_ret != H_SUCCESS) {
+                       ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+                                "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+                                "lkey=%x",
+                                h_ret, e_fmr, shca->ipz_hca_handle.handle,
+                                e_fmr->ipz_mr_handle.handle,
+                                e_fmr->ib.ib_fmr.lkey);
+                       ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+                       goto ehca_unmap_one_fmr_exit0;
+               }
+               /* clean ehca_mr_t, without changing lock */
+               save_fmr = *e_fmr;
+               ehca_mr_deletenew(e_fmr);
+
+               /* set some MR values */
+               e_fmr->flags = save_fmr.flags;
+               e_fmr->fmr_page_size = save_fmr.fmr_page_size;
+               e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
+               e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
+               e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
+               e_fmr->acl = save_fmr.acl;
+
+               pginfo.type      = EHCA_MR_PGI_FMR;
+               pginfo.num_pages = 0;
+               pginfo.num_4k    = 0;
+               ret = ehca_reg_mr(shca, e_fmr, NULL,
+                                 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
+                                 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
+                                 &tmp_rkey);
+               if (ret) {
+                       u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
+                       memcpy(&e_fmr->flags, &(save_mr.flags),
+                              sizeof(struct ehca_mr) - offset);
+                       goto ehca_unmap_one_fmr_exit0;
+               }
+       }
+
+ehca_unmap_one_fmr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
+                        "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
+                        ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
+                        rereg_1_hcall, rereg_3_hcall);
+       return ret;
+} /* end ehca_unmap_one_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_smr(struct ehca_shca *shca,
+                struct ehca_mr *e_origmr,
+                struct ehca_mr *e_newmr,
+                u64 *iova_start,
+                int acl,
+                struct ehca_pd *e_pd,
+                u32 *lkey, /*OUT*/
+                u32 *rkey) /*OUT*/
+{
+       int ret = 0;
+       u64 h_ret;
+       u32 hipz_acl;
+       struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+       ehca_mrmw_map_acl(acl, &hipz_acl);
+       ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+
+       h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
+                                   (u64)iova_start, hipz_acl, e_pd->fw_pd,
+                                   &hipzout);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
+                        "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
+                        "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+                        h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
+                        shca->ipz_hca_handle.handle,
+                        e_origmr->ipz_mr_handle.handle,
+                        e_origmr->ib.ib_mr.lkey);
+               ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
+               goto ehca_reg_smr_exit0;
+       }
+       /* successful registration */
+       e_newmr->num_pages     = e_origmr->num_pages;
+       e_newmr->num_4k        = e_origmr->num_4k;
+       e_newmr->start         = iova_start;
+       e_newmr->size          = e_origmr->size;
+       e_newmr->acl           = acl;
+       e_newmr->ipz_mr_handle = hipzout.handle;
+       *lkey = hipzout.lkey;
+       *rkey = hipzout.rkey;
+       return 0;
+
+ehca_reg_smr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
+                        "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
+                        ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
+       return ret;
+} /* end ehca_reg_smr() */
+
+/*----------------------------------------------------------------------*/
+
+/* register internal max-MR to internal SHCA */
+int ehca_reg_internal_maxmr(
+       struct ehca_shca *shca,
+       struct ehca_pd *e_pd,
+       struct ehca_mr **e_maxmr)  /*OUT*/
+{
+       int ret;
+       struct ehca_mr *e_mr;
+       u64 *iova_start;
+       u64 size_maxmr;
+       struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+       struct ib_phys_buf ib_pbuf;
+       u32 num_pages_mr;
+       u32 num_pages_4k; /* 4k portion "pages" */
+
+       e_mr = ehca_mr_new();
+       if (!e_mr) {
+               ehca_err(&shca->ib_device, "out of memory");
+               ret = -ENOMEM;
+               goto ehca_reg_internal_maxmr_exit0;
+       }
+       e_mr->flags |= EHCA_MR_FLAG_MAXMR;
+
+       /* register internal max-MR on HCA */
+       size_maxmr = (u64)high_memory - PAGE_OFFSET;
+       iova_start = (u64*)KERNELBASE;
+       ib_pbuf.addr = 0;
+       ib_pbuf.size = size_maxmr;
+       num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
+                        PAGE_SIZE - 1) / PAGE_SIZE);
+       num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
+                        EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+
+       pginfo.type           = EHCA_MR_PGI_PHYS;
+       pginfo.num_pages      = num_pages_mr;
+       pginfo.num_4k         = num_pages_4k;
+       pginfo.num_phys_buf   = 1;
+       pginfo.phys_buf_array = &ib_pbuf;
+
+       ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
+                         &pginfo, &e_mr->ib.ib_mr.lkey,
+                         &e_mr->ib.ib_mr.rkey);
+       if (ret) {
+               ehca_err(&shca->ib_device, "reg of internal max MR failed, "
+                        "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
+                        "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
+                        num_pages_mr, num_pages_4k);
+               goto ehca_reg_internal_maxmr_exit1;
+       }
+
+       /* successful registration of all pages */
+       e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
+       e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
+       e_mr->ib.ib_mr.uobject = NULL;
+       atomic_inc(&(e_pd->ib_pd.usecnt));
+       atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
+       *e_maxmr = e_mr;
+       return 0;
+
+ehca_reg_internal_maxmr_exit1:
+       ehca_mr_delete(e_mr);
+ehca_reg_internal_maxmr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
+                        ret, shca, e_pd, e_maxmr);
+       return ret;
+} /* end ehca_reg_internal_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_maxmr(struct ehca_shca *shca,
+                  struct ehca_mr *e_newmr,
+                  u64 *iova_start,
+                  int acl,
+                  struct ehca_pd *e_pd,
+                  u32 *lkey,
+                  u32 *rkey)
+{
+       u64 h_ret;
+       struct ehca_mr *e_origmr = shca->maxmr;
+       u32 hipz_acl;
+       struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+       ehca_mrmw_map_acl(acl, &hipz_acl);
+       ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+
+       h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
+                                   (u64)iova_start, hipz_acl, e_pd->fw_pd,
+                                   &hipzout);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
+                        "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+                        h_ret, e_origmr, shca->ipz_hca_handle.handle,
+                        e_origmr->ipz_mr_handle.handle,
+                        e_origmr->ib.ib_mr.lkey);
+               return ehca_mrmw_map_hrc_reg_smr(h_ret);
+       }
+       /* successful registration */
+       e_newmr->num_pages     = e_origmr->num_pages;
+       e_newmr->num_4k        = e_origmr->num_4k;
+       e_newmr->start         = iova_start;
+       e_newmr->size          = e_origmr->size;
+       e_newmr->acl           = acl;
+       e_newmr->ipz_mr_handle = hipzout.handle;
+       *lkey = hipzout.lkey;
+       *rkey = hipzout.rkey;
+       return 0;
+} /* end ehca_reg_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
+{
+       int ret;
+       struct ehca_mr *e_maxmr;
+       struct ib_pd *ib_pd;
+
+       if (!shca->maxmr) {
+               ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
+               ret = -EINVAL;
+               goto ehca_dereg_internal_maxmr_exit0;
+       }
+
+       e_maxmr = shca->maxmr;
+       ib_pd = e_maxmr->ib.ib_mr.pd;
+       shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
+
+       ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
+       if (ret) {
+               ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
+                        "ret=%x e_maxmr=%p shca=%p lkey=%x",
+                        ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
+               shca->maxmr = e_maxmr;
+               goto ehca_dereg_internal_maxmr_exit0;
+       }
+
+       atomic_dec(&ib_pd->usecnt);
+
+ehca_dereg_internal_maxmr_exit0:
+       if (ret)
+               ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
+                        ret, shca, shca->maxmr);
+       return ret;
+} /* end ehca_dereg_internal_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * check physical buffer array of MR verbs for validness and
+ * calculates MR size
+ */
+int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
+                                 int num_phys_buf,
+                                 u64 *iova_start,
+                                 u64 *size)
+{
+       struct ib_phys_buf *pbuf = phys_buf_array;
+       u64 size_count = 0;
+       u32 i;
+
+       if (num_phys_buf == 0) {
+               ehca_gen_err("bad phys buf array len, num_phys_buf=0");
+               return -EINVAL;
+       }
+       /* check first buffer */
+       if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
+               ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
+                            "pbuf->addr=%lx pbuf->size=%lx",
+                            iova_start, pbuf->addr, pbuf->size);
+               return -EINVAL;
+       }
+       if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
+           (num_phys_buf > 1)) {
+               ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
+                            "pbuf->size=%lx", pbuf->addr, pbuf->size);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_phys_buf; i++) {
+               if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
+                       ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
+                                    "pbuf->size=%lx",
+                                    i, pbuf->addr, pbuf->size);
+                       return -EINVAL;
+               }
+               if (((i > 0) && /* not 1st */
+                    (i < (num_phys_buf - 1)) &&        /* not last */
+                    (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
+                       ehca_gen_err("bad size, i=%x pbuf->size=%lx",
+                                    i, pbuf->size);
+                       return -EINVAL;
+               }
+               size_count += pbuf->size;
+               pbuf++;
+       }
+
+       *size = size_count;
+       return 0;
+} /* end ehca_mr_chk_buf_and_calc_size() */
+
+/*----------------------------------------------------------------------*/
+
+/* check page list of map FMR verb for validness */
+int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
+                            u64 *page_list,
+                            int list_len)
+{
+       u32 i;
+       u64 *page;
+
+       if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
+               ehca_gen_err("bad list_len, list_len=%x "
+                            "e_fmr->fmr_max_pages=%x fmr=%p",
+                            list_len, e_fmr->fmr_max_pages, e_fmr);
+               return -EINVAL;
+       }
+
+       /* each page must be aligned */
+       page = page_list;
+       for (i = 0; i < list_len; i++) {
+               if (*page % e_fmr->fmr_page_size) {
+                       ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
+                                    "fmr_page_size=%x", i, *page, page, e_fmr,
+                                    e_fmr->fmr_page_size);
+                       return -EINVAL;
+               }
+               page++;
+       }
+
+       return 0;
+} /* end ehca_fmr_check_page_list() */
+
+/*----------------------------------------------------------------------*/
+
+/* setup page buffer from page info */
+int ehca_set_pagebuf(struct ehca_mr *e_mr,
+                    struct ehca_mr_pginfo *pginfo,
+                    u32 number,
+                    u64 *kpage)
+{
+       int ret = 0;
+       struct ib_umem_chunk *prev_chunk;
+       struct ib_umem_chunk *chunk;
+       struct ib_phys_buf *pbuf;
+       u64 *fmrlist;
+       u64 num4k, pgaddr, offs4k;
+       u32 i = 0;
+       u32 j = 0;
+
+       if (pginfo->type == EHCA_MR_PGI_PHYS) {
+               /* loop over desired phys_buf_array entries */
+               while (i < number) {
+                       pbuf   = pginfo->phys_buf_array + pginfo->next_buf;
+                       num4k  = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
+                                 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
+                       offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
+                       while (pginfo->next_4k < offs4k + num4k) {
+                               /* sanity check */
+                               if ((pginfo->page_cnt >= pginfo->num_pages) ||
+                                   (pginfo->page_4k_cnt >= pginfo->num_4k)) {
+                                       ehca_gen_err("page_cnt >= num_pages, "
+                                                    "page_cnt=%lx "
+                                                    "num_pages=%lx "
+                                                    "page_4k_cnt=%lx "
+                                                    "num_4k=%lx i=%x",
+                                                    pginfo->page_cnt,
+                                                    pginfo->num_pages,
+                                                    pginfo->page_4k_cnt,
+                                                    pginfo->num_4k, i);
+                                       ret = -EFAULT;
+                                       goto ehca_set_pagebuf_exit0;
+                               }
+                               *kpage = phys_to_abs(
+                                       (pbuf->addr & EHCA_PAGEMASK)
+                                       + (pginfo->next_4k * EHCA_PAGESIZE));
+                               if ( !(*kpage) && pbuf->addr ) {
+                                       ehca_gen_err("pbuf->addr=%lx "
+                                                    "pbuf->size=%lx "
+                                                    "next_4k=%lx", pbuf->addr,
+                                                    pbuf->size,
+                                                    pginfo->next_4k);
+                                       ret = -EFAULT;
+                                       goto ehca_set_pagebuf_exit0;
+                               }
+                               (pginfo->page_4k_cnt)++;
+                               (pginfo->next_4k)++;
+                               if (pginfo->next_4k %
+                                   (PAGE_SIZE / EHCA_PAGESIZE) == 0)
+                                       (pginfo->page_cnt)++;
+                               kpage++;
+                               i++;
+                               if (i >= number) break;
+                       }
+                       if (pginfo->next_4k >= offs4k + num4k) {
+                               (pginfo->next_buf)++;
+                               pginfo->next_4k = 0;
+                       }
+               }
+       } else if (pginfo->type == EHCA_MR_PGI_USER) {
+               /* loop over desired chunk entries */
+               chunk      = pginfo->next_chunk;
+               prev_chunk = pginfo->next_chunk;
+               list_for_each_entry_continue(chunk,
+                                            (&(pginfo->region->chunk_list)),
+                                            list) {
+                       for (i = pginfo->next_nmap; i < chunk->nmap; ) {
+                               pgaddr = ( page_to_pfn(chunk->page_list[i].page)
+                                          << PAGE_SHIFT );
+                               *kpage = phys_to_abs(pgaddr +
+                                                    (pginfo->next_4k *
+                                                     EHCA_PAGESIZE));
+                               if ( !(*kpage) ) {
+                                       ehca_gen_err("pgaddr=%lx "
+                                                    "chunk->page_list[i]=%lx "
+                                                    "i=%x next_4k=%lx mr=%p",
+                                                    pgaddr,
+                                                    (u64)sg_dma_address(
+                                                            &chunk->
+                                                            page_list[i]),
+                                                    i, pginfo->next_4k, e_mr);
+                                       ret = -EFAULT;
+                                       goto ehca_set_pagebuf_exit0;
+                               }
+                               (pginfo->page_4k_cnt)++;
+                               (pginfo->next_4k)++;
+                               kpage++;
+                               if (pginfo->next_4k %
+                                   (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
+                                       (pginfo->page_cnt)++;
+                                       (pginfo->next_nmap)++;
+                                       pginfo->next_4k = 0;
+                                       i++;
+                               }
+                               j++;
+                               if (j >= number) break;
+                       }
+                       if ((pginfo->next_nmap >= chunk->nmap) &&
+                           (j >= number)) {
+                               pginfo->next_nmap = 0;
+                               prev_chunk = chunk;
+                               break;
+                       } else if (pginfo->next_nmap >= chunk->nmap) {
+                               pginfo->next_nmap = 0;
+                               prev_chunk = chunk;
+                       } else if (j >= number)
+                               break;
+                       else
+                               prev_chunk = chunk;
+               }
+               pginfo->next_chunk =
+                       list_prepare_entry(prev_chunk,
+                                          (&(pginfo->region->chunk_list)),
+                                          list);
+       } else if (pginfo->type == EHCA_MR_PGI_FMR) {
+               /* loop over desired page_list entries */
+               fmrlist = pginfo->page_list + pginfo->next_listelem;
+               for (i = 0; i < number; i++) {
+                       *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
+                                            pginfo->next_4k * EHCA_PAGESIZE);
+                       if ( !(*kpage) ) {
+                               ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+                                            "next_listelem=%lx next_4k=%lx",
+                                            *fmrlist, fmrlist,
+                                            pginfo->next_listelem,
+                                            pginfo->next_4k);
+                               ret = -EFAULT;
+                               goto ehca_set_pagebuf_exit0;
+                       }
+                       (pginfo->page_4k_cnt)++;
+                       (pginfo->next_4k)++;
+                       kpage++;
+                       if (pginfo->next_4k %
+                           (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
+                               (pginfo->page_cnt)++;
+                               (pginfo->next_listelem)++;
+                               fmrlist++;
+                               pginfo->next_4k = 0;
+                       }
+               }
+       } else {
+               ehca_gen_err("bad pginfo->type=%x", pginfo->type);
+               ret = -EFAULT;
+               goto ehca_set_pagebuf_exit0;
+       }
+
+ehca_set_pagebuf_exit0:
+       if (ret)
+               ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
+                            "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
+                            "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
+                            "next_listelem=%lx region=%p next_chunk=%p "
+                            "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
+                            pginfo->num_pages, pginfo->num_4k,
+                            pginfo->next_buf, pginfo->next_4k, number, kpage,
+                            pginfo->page_cnt, pginfo->page_4k_cnt, i,
+                            pginfo->next_listelem, pginfo->region,
+                            pginfo->next_chunk, pginfo->next_nmap);
+       return ret;
+} /* end ehca_set_pagebuf() */
+
+/*----------------------------------------------------------------------*/
+
+/* setup 1 page from page info page buffer */
+int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
+                      struct ehca_mr_pginfo *pginfo,
+                      u64 *rpage)
+{
+       int ret = 0;
+       struct ib_phys_buf *tmp_pbuf;
+       u64 *fmrlist;
+       struct ib_umem_chunk *chunk;
+       struct ib_umem_chunk *prev_chunk;
+       u64 pgaddr, num4k, offs4k;
+
+       if (pginfo->type == EHCA_MR_PGI_PHYS) {
+               /* sanity check */
+               if ((pginfo->page_cnt >= pginfo->num_pages) ||
+                   (pginfo->page_4k_cnt >= pginfo->num_4k)) {
+                       ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
+                                    "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
+                                    pginfo->page_cnt, pginfo->num_pages,
+                                    pginfo->page_4k_cnt, pginfo->num_4k);
+                       ret = -EFAULT;
+                       goto ehca_set_pagebuf_1_exit0;
+               }
+               tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
+               num4k  = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
+                         EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
+               offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
+               *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
+                                    (pginfo->next_4k * EHCA_PAGESIZE));
+               if ( !(*rpage) && tmp_pbuf->addr ) {
+                       ehca_gen_err("tmp_pbuf->addr=%lx"
+                                    " tmp_pbuf->size=%lx next_4k=%lx",
+                                    tmp_pbuf->addr, tmp_pbuf->size,
+                                    pginfo->next_4k);
+                       ret = -EFAULT;
+                       goto ehca_set_pagebuf_1_exit0;
+               }
+               (pginfo->page_4k_cnt)++;
+               (pginfo->next_4k)++;
+               if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
+                       (pginfo->page_cnt)++;
+               if (pginfo->next_4k >= offs4k + num4k) {
+                       (pginfo->next_buf)++;
+                       pginfo->next_4k = 0;
+               }
+       } else if (pginfo->type == EHCA_MR_PGI_USER) {
+               chunk      = pginfo->next_chunk;
+               prev_chunk = pginfo->next_chunk;
+               list_for_each_entry_continue(chunk,
+                                            (&(pginfo->region->chunk_list)),
+                                            list) {
+                       pgaddr = ( page_to_pfn(chunk->page_list[
+                                                      pginfo->next_nmap].page)
+                                  << PAGE_SHIFT);
+                       *rpage = phys_to_abs(pgaddr +
+                                            (pginfo->next_4k * EHCA_PAGESIZE));
+                       if ( !(*rpage) ) {
+                               ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
+                                            " next_nmap=%lx next_4k=%lx mr=%p",
+                                            pgaddr, (u64)sg_dma_address(
+                                                    &chunk->page_list[
+                                                            pginfo->
+                                                            next_nmap]),
+                                            pginfo->next_nmap, pginfo->next_4k,
+                                            e_mr);
+                               ret = -EFAULT;
+                               goto ehca_set_pagebuf_1_exit0;
+                       }
+                       (pginfo->page_4k_cnt)++;
+                       (pginfo->next_4k)++;
+                       if (pginfo->next_4k %
+                           (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
+                               (pginfo->page_cnt)++;
+                               (pginfo->next_nmap)++;
+                               pginfo->next_4k = 0;
+                       }
+                       if (pginfo->next_nmap >= chunk->nmap) {
+                               pginfo->next_nmap = 0;
+                               prev_chunk = chunk;
+                       }
+                       break;
+               }
+               pginfo->next_chunk =
+                       list_prepare_entry(prev_chunk,
+                                          (&(pginfo->region->chunk_list)),
+                                          list);
+       } else if (pginfo->type == EHCA_MR_PGI_FMR) {
+               fmrlist = pginfo->page_list + pginfo->next_listelem;
+               *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
+                                    pginfo->next_4k * EHCA_PAGESIZE);
+               if ( !(*rpage) ) {
+                       ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+                                    "next_listelem=%lx next_4k=%lx",
+                                    *fmrlist, fmrlist, pginfo->next_listelem,
+                                    pginfo->next_4k);
+                       ret = -EFAULT;
+                       goto ehca_set_pagebuf_1_exit0;
+               }
+               (pginfo->page_4k_cnt)++;
+               (pginfo->next_4k)++;
+               if (pginfo->next_4k %
+                   (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
+                       (pginfo->page_cnt)++;
+                       (pginfo->next_listelem)++;
+                       pginfo->next_4k = 0;
+               }
+       } else {
+               ehca_gen_err("bad pginfo->type=%x", pginfo->type);
+               ret = -EFAULT;
+               goto ehca_set_pagebuf_1_exit0;
+       }
+
+ehca_set_pagebuf_1_exit0:
+       if (ret)
+               ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
+                            "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
+                            "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
+                            "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
+                            pginfo, pginfo->type, pginfo->num_pages,
+                            pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
+                            rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
+                            pginfo->next_listelem, pginfo->region,
+                            pginfo->next_chunk, pginfo->next_nmap);
+       return ret;
+} /* end ehca_set_pagebuf_1() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * check MR if it is a max-MR, i.e. uses whole memory
+ * in case it's a max-MR 1 is returned, else 0
+ */
+int ehca_mr_is_maxmr(u64 size,
+                    u64 *iova_start)
+{
+       /* a MR is treated as max-MR only if it fits following: */
+       if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
+           (iova_start == (void*)KERNELBASE)) {
+               ehca_gen_dbg("this is a max-MR");
+               return 1;
+       } else
+               return 0;
+} /* end ehca_mr_is_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+/* map access control for MR/MW. This routine is used for MR and MW. */
+void ehca_mrmw_map_acl(int ib_acl,
+                      u32 *hipz_acl)
+{
+       *hipz_acl = 0;
+       if (ib_acl & IB_ACCESS_REMOTE_READ)
+               *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
+       if (ib_acl & IB_ACCESS_REMOTE_WRITE)
+               *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
+       if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
+               *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
+       if (ib_acl & IB_ACCESS_LOCAL_WRITE)
+               *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
+       if (ib_acl & IB_ACCESS_MW_BIND)
+               *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
+} /* end ehca_mrmw_map_acl() */
+
+/*----------------------------------------------------------------------*/
+
+/* sets page size in hipz access control for MR/MW. */
+void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
+{
+       return; /* HCA supports only 4k */
+} /* end ehca_mrmw_set_pgsize_hipz_acl() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * reverse map access control for MR/MW.
+ * This routine is used for MR and MW.
+ */
+void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
+                              int *ib_acl) /*OUT*/
+{
+       *ib_acl = 0;
+       if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
+               *ib_acl |= IB_ACCESS_REMOTE_READ;
+       if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
+               *ib_acl |= IB_ACCESS_REMOTE_WRITE;
+       if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
+               *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
+       if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
+               *ib_acl |= IB_ACCESS_LOCAL_WRITE;
+       if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
+               *ib_acl |= IB_ACCESS_MW_BIND;
+} /* end ehca_mrmw_reverse_map_acl() */
+
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for MR/MW allocations
+ * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
+ */
+int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_SUCCESS:              /* successful completion */
+               return 0;
+       case H_ADAPTER_PARM:         /* invalid adapter handle */
+       case H_RT_PARM:              /* invalid resource type */
+       case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
+       case H_MLENGTH_PARM:         /* invalid memory length */
+       case H_MEM_ACCESS_PARM:      /* invalid access controls */
+       case H_CONSTRAINED:          /* resource constraint */
+               return -EINVAL;
+       case H_BUSY:                 /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_alloc() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for MR register rpage
+ * Used for hipz_h_register_rpage_mr at registering last page
+ */
+int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_SUCCESS:         /* registration complete */
+               return 0;
+       case H_PAGE_REGISTERED: /* page registered */
+       case H_ADAPTER_PARM:    /* invalid adapter handle */
+       case H_RH_PARM:         /* invalid resource handle */
+/*     case H_QT_PARM:            invalid queue type */
+       case H_PARAMETER:       /*
+                                * invalid logical address,
+                                * or count zero or greater 512
+                                */
+       case H_TABLE_FULL:      /* page table full */
+       case H_HARDWARE:        /* HCA not operational */
+               return -EINVAL;
+       case H_BUSY:            /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_rrpg_last() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for MR register rpage
+ * Used for hipz_h_register_rpage_mr at registering one page, but not last page
+ */
+int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_PAGE_REGISTERED: /* page registered */
+               return 0;
+       case H_SUCCESS:         /* registration complete */
+       case H_ADAPTER_PARM:    /* invalid adapter handle */
+       case H_RH_PARM:         /* invalid resource handle */
+/*     case H_QT_PARM:            invalid queue type */
+       case H_PARAMETER:       /*
+                                * invalid logical address,
+                                * or count zero or greater 512
+                                */
+       case H_TABLE_FULL:      /* page table full */
+       case H_HARDWARE:        /* HCA not operational */
+               return -EINVAL;
+       case H_BUSY:            /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_rrpg_notlast() */
+
+/*----------------------------------------------------------------------*/
+
+/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
+int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_SUCCESS:              /* successful completion */
+               return 0;
+       case H_ADAPTER_PARM:         /* invalid adapter handle */
+       case H_RH_PARM:              /* invalid resource handle */
+               return -EINVAL;
+       case H_BUSY:                 /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_query_mr() */
+
+/*----------------------------------------------------------------------*/
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for freeing MR resource
+ * Used for hipz_h_free_resource_mr
+ */
+int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_SUCCESS:      /* resource freed */
+               return 0;
+       case H_ADAPTER_PARM: /* invalid adapter handle */
+       case H_RH_PARM:      /* invalid resource handle */
+       case H_R_STATE:      /* invalid resource state */
+       case H_HARDWARE:     /* HCA not operational */
+               return -EINVAL;
+       case H_RESOURCE:     /* Resource in use */
+       case H_BUSY:         /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_free_mr() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for freeing MW resource
+ * Used for hipz_h_free_resource_mw
+ */
+int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_SUCCESS:      /* resource freed */
+               return 0;
+       case H_ADAPTER_PARM: /* invalid adapter handle */
+       case H_RH_PARM:      /* invalid resource handle */
+       case H_R_STATE:      /* invalid resource state */
+       case H_HARDWARE:     /* HCA not operational */
+               return -EINVAL;
+       case H_RESOURCE:     /* Resource in use */
+       case H_BUSY:         /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_free_mw() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for SMR registrations
+ * Used for hipz_h_register_smr.
+ */
+int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
+{
+       switch (hipz_rc) {
+       case H_SUCCESS:              /* successful completion */
+               return 0;
+       case H_ADAPTER_PARM:         /* invalid adapter handle */
+       case H_RH_PARM:              /* invalid resource handle */
+       case H_MEM_PARM:             /* invalid MR virtual address */
+       case H_MEM_ACCESS_PARM:      /* invalid access controls */
+       case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
+               return -EINVAL;
+       case H_BUSY:                 /* long busy */
+               return -EBUSY;
+       default:
+               return -EINVAL;
+       }
+} /* end ehca_mrmw_map_hrc_reg_smr() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * MR destructor and constructor
+ * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
+ * except struct ib_mr and spinlock
+ */
+void ehca_mr_deletenew(struct ehca_mr *mr)
+{
+       mr->flags         = 0;
+       mr->num_pages     = 0;
+       mr->num_4k        = 0;
+       mr->acl           = 0;
+       mr->start         = NULL;
+       mr->fmr_page_size = 0;
+       mr->fmr_max_pages = 0;
+       mr->fmr_max_maps  = 0;
+       mr->fmr_map_cnt   = 0;
+       memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
+       memset(&mr->galpas, 0, sizeof(mr->galpas));
+       mr->nr_of_pages   = 0;
+       mr->pagearray     = NULL;
+} /* end ehca_mr_deletenew() */
+
+int ehca_init_mrmw_cache(void)
+{
+       mr_cache = kmem_cache_create("ehca_cache_mr",
+                                    sizeof(struct ehca_mr), 0,
+                                    SLAB_HWCACHE_ALIGN,
+                                    NULL, NULL);
+       if (!mr_cache)
+               return -ENOMEM;
+       mw_cache = kmem_cache_create("ehca_cache_mw",
+                                    sizeof(struct ehca_mw), 0,
+                                    SLAB_HWCACHE_ALIGN,
+                                    NULL, NULL);
+       if (!mw_cache) {
+               kmem_cache_destroy(mr_cache);
+               mr_cache = NULL;
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void ehca_cleanup_mrmw_cache(void)
+{
+       if (mr_cache)
+               kmem_cache_destroy(mr_cache);
+       if (mw_cache)
+               kmem_cache_destroy(mw_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
new file mode 100644 (file)
index 0000000..d936e40
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  MR/MW declarations and inline functions
+ *
+ *  Authors: Dietmar Decker <ddecker@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EHCA_MRMW_H_
+#define _EHCA_MRMW_H_
+
+int ehca_reg_mr(struct ehca_shca *shca,
+               struct ehca_mr *e_mr,
+               u64 *iova_start,
+               u64 size,
+               int acl,
+               struct ehca_pd *e_pd,
+               struct ehca_mr_pginfo *pginfo,
+               u32 *lkey,
+               u32 *rkey);
+
+int ehca_reg_mr_rpages(struct ehca_shca *shca,
+                      struct ehca_mr *e_mr,
+                      struct ehca_mr_pginfo *pginfo);
+
+int ehca_rereg_mr(struct ehca_shca *shca,
+                 struct ehca_mr *e_mr,
+                 u64 *iova_start,
+                 u64 size,
+                 int mr_access_flags,
+                 struct ehca_pd *e_pd,
+                 struct ehca_mr_pginfo *pginfo,
+                 u32 *lkey,
+                 u32 *rkey);
+
+int ehca_unmap_one_fmr(struct ehca_shca *shca,
+                      struct ehca_mr *e_fmr);
+
+int ehca_reg_smr(struct ehca_shca *shca,
+                struct ehca_mr *e_origmr,
+                struct ehca_mr *e_newmr,
+                u64 *iova_start,
+                int acl,
+                struct ehca_pd *e_pd,
+                u32 *lkey,
+                u32 *rkey);
+
+int ehca_reg_internal_maxmr(struct ehca_shca *shca,
+                           struct ehca_pd *e_pd,
+                           struct ehca_mr **maxmr);
+
+int ehca_reg_maxmr(struct ehca_shca *shca,
+                  struct ehca_mr *e_newmr,
+                  u64 *iova_start,
+                  int acl,
+                  struct ehca_pd *e_pd,
+                  u32 *lkey,
+                  u32 *rkey);
+
+int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
+
+int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
+                                 int num_phys_buf,
+                                 u64 *iova_start,
+                                 u64 *size);
+
+int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
+                            u64 *page_list,
+                            int list_len);
+
+int ehca_set_pagebuf(struct ehca_mr *e_mr,
+                    struct ehca_mr_pginfo *pginfo,
+                    u32 number,
+                    u64 *kpage);
+
+int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
+                      struct ehca_mr_pginfo *pginfo,
+                      u64 *rpage);
+
+int ehca_mr_is_maxmr(u64 size,
+                    u64 *iova_start);
+
+void ehca_mrmw_map_acl(int ib_acl,
+                      u32 *hipz_acl);
+
+void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl);
+
+void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
+                              int *ib_acl);
+
+int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
+
+void ehca_mr_deletenew(struct ehca_mr *mr);
+
+#endif  /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
new file mode 100644 (file)
index 0000000..2c3cdc6
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  PD functions
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_tools.h"
+#include "ehca_iverbs.h"
+
+static struct kmem_cache *pd_cache;
+
+struct ib_pd *ehca_alloc_pd(struct ib_device *device,
+                           struct ib_ucontext *context, struct ib_udata *udata)
+{
+       struct ehca_pd *pd;
+
+       pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
+       if (!pd) {
+               ehca_err(device, "device=%p context=%p out of memory",
+                        device, context);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       memset(pd, 0, sizeof(struct ehca_pd));
+       pd->ownpid = current->tgid;
+
+       /*
+        * Kernel PD: when device = -1, 0
+        * User   PD: when context != -1
+        */
+       if (!context) {
+               /*
+                * Kernel PDs after init reuses always
+                * the one created in ehca_shca_reopen()
+                */
+               struct ehca_shca *shca = container_of(device, struct ehca_shca,
+                                                     ib_device);
+               pd->fw_pd.value = shca->pd->fw_pd.value;
+       } else
+               pd->fw_pd.value = (u64)pd;
+
+       return &pd->ib_pd;
+}
+
+int ehca_dealloc_pd(struct ib_pd *pd)
+{
+       u32 cur_pid = current->tgid;
+       struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       kmem_cache_free(pd_cache,
+                       container_of(pd, struct ehca_pd, ib_pd));
+
+       return 0;
+}
+
+int ehca_init_pd_cache(void)
+{
+       pd_cache = kmem_cache_create("ehca_cache_pd",
+                                    sizeof(struct ehca_pd), 0,
+                                    SLAB_HWCACHE_ALIGN,
+                                    NULL, NULL);
+       if (!pd_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void ehca_cleanup_pd_cache(void)
+{
+       if (pd_cache)
+               kmem_cache_destroy(pd_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
new file mode 100644 (file)
index 0000000..8707d29
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Hardware request structures
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _EHCA_QES_H_
+#define _EHCA_QES_H_
+
+#include "ehca_tools.h"
+
+/* virtual scatter gather entry to specify remote adresses with length */
+struct ehca_vsgentry {
+       u64 vaddr;
+       u32 lkey;
+       u32 length;
+};
+
+#define GRH_FLAG_MASK        EHCA_BMASK_IBM(7,7)
+#define GRH_IPVERSION_MASK   EHCA_BMASK_IBM(0,3)
+#define GRH_TCLASS_MASK      EHCA_BMASK_IBM(4,12)
+#define GRH_FLOWLABEL_MASK   EHCA_BMASK_IBM(13,31)
+#define GRH_PAYLEN_MASK      EHCA_BMASK_IBM(32,47)
+#define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48,55)
+#define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56,63)
+
+/*
+ * Unreliable Datagram Address Vector Format
+ * see IBTA Vol1 chapter 8.3 Global Routing Header
+ */
+struct ehca_ud_av {
+       u8 sl;
+       u8 lnh;
+       u16 dlid;
+       u8 reserved1;
+       u8 reserved2;
+       u8 reserved3;
+       u8 slid_path_bits;
+       u8 reserved4;
+       u8 ipd;
+       u8 reserved5;
+       u8 pmtu;
+       u32 reserved6;
+       u64 reserved7;
+       union {
+               struct {
+                       u64 word_0; /* always set to 6  */
+                       /*should be 0x1B for IB transport */
+                       u64 word_1;
+                       u64 word_2;
+                       u64 word_3;
+                       u64 word_4;
+               } grh;
+               struct {
+                       u32 wd_0;
+                       u32 wd_1;
+                       /* DWord_1 --> SGID */
+
+                       u32 sgid_wd3;
+                       u32 sgid_wd2;
+
+                       u32 sgid_wd1;
+                       u32 sgid_wd0;
+                       /* DWord_3 --> DGID */
+
+                       u32 dgid_wd3;
+                       u32 dgid_wd2;
+
+                       u32 dgid_wd1;
+                       u32 dgid_wd0;
+               } grh_l;
+       };
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define MAX_WQE_SG_ENTRIES 252
+
+#define WQE_OPTYPE_SEND             0x80
+#define WQE_OPTYPE_RDMAREAD         0x40
+#define WQE_OPTYPE_RDMAWRITE        0x20
+#define WQE_OPTYPE_CMPSWAP          0x10
+#define WQE_OPTYPE_FETCHADD         0x08
+#define WQE_OPTYPE_BIND             0x04
+
+#define WQE_WRFLAG_REQ_SIGNAL_COM   0x80
+#define WQE_WRFLAG_FENCE            0x40
+#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
+#define WQE_WRFLAG_SOLIC_EVENT      0x10
+
+#define WQEF_CACHE_HINT             0x80
+#define WQEF_CACHE_HINT_RD_WR       0x40
+#define WQEF_TIMED_WQE              0x20
+#define WQEF_PURGE                  0x08
+#define WQEF_HIGH_NIBBLE            0xF0
+
+#define MW_BIND_ACCESSCTRL_R_WRITE   0x40
+#define MW_BIND_ACCESSCTRL_R_READ    0x20
+#define MW_BIND_ACCESSCTRL_R_ATOMIC  0x10
+
+struct ehca_wqe {
+       u64 work_request_id;
+       u8 optype;
+       u8 wr_flag;
+       u16 pkeyi;
+       u8 wqef;
+       u8 nr_of_data_seg;
+       u16 wqe_provided_slid;
+       u32 destination_qp_number;
+       u32 resync_psn_sqp;
+       u32 local_ee_context_qkey;
+       u32 immediate_data;
+       union {
+               struct {
+                       u64 remote_virtual_adress;
+                       u32 rkey;
+                       u32 reserved;
+                       u64 atomic_1st_op_dma_len;
+                       u64 atomic_2nd_op;
+                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
+
+               } nud;
+               struct {
+                       u64 ehca_ud_av_ptr;
+                       u64 reserved1;
+                       u64 reserved2;
+                       u64 reserved3;
+                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
+               } ud_avp;
+               struct {
+                       struct ehca_ud_av ud_av;
+                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
+                                                    2];
+               } ud_av;
+               struct {
+                       u64 reserved0;
+                       u64 reserved1;
+                       u64 reserved2;
+                       u64 reserved3;
+                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
+               } all_rcv;
+
+               struct {
+                       u64 reserved;
+                       u32 rkey;
+                       u32 old_rkey;
+                       u64 reserved1;
+                       u64 reserved2;
+                       u64 virtual_address;
+                       u32 reserved3;
+                       u32 length;
+                       u32 reserved4;
+                       u16 reserved5;
+                       u8 reserved6;
+                       u8 lr_ctl;
+                       u32 lkey;
+                       u32 reserved7;
+                       u64 reserved8;
+                       u64 reserved9;
+                       u64 reserved10;
+                       u64 reserved11;
+               } bind;
+               struct {
+                       u64 reserved12;
+                       u64 reserved13;
+                       u32 size;
+                       u32 start;
+               } inline_data;
+       } u;
+
+};
+
+#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0)
+#define WC_IMM_DATA     EHCA_BMASK_IBM(1,1)
+#define WC_GRH_PRESENT  EHCA_BMASK_IBM(2,2)
+#define WC_SE_BIT       EHCA_BMASK_IBM(3,3)
+#define WC_STATUS_ERROR_BIT 0x80000000
+#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
+#define WC_STATUS_PURGE_BIT 0x10
+
+struct ehca_cqe {
+       u64 work_request_id;
+       u8 optype;
+       u8 w_completion_flags;
+       u16 reserved1;
+       u32 nr_bytes_transferred;
+       u32 immediate_data;
+       u32 local_qp_number;
+       u8 freed_resource_count;
+       u8 service_level;
+       u16 wqe_count;
+       u32 qp_token;
+       u32 qkey_ee_token;
+       u32 remote_qp_number;
+       u16 dlid;
+       u16 rlid;
+       u16 reserved2;
+       u16 pkey_index;
+       u32 cqe_timestamp;
+       u32 wqe_timestamp;
+       u8 wqe_timestamp_valid;
+       u8 reserved3;
+       u8 reserved4;
+       u8 cqe_flags;
+       u32 status;
+};
+
+struct ehca_eqe {
+       u64 entry;
+};
+
+struct ehca_mrte {
+       u64 starting_va;
+       u64 length; /* length of memory region in bytes*/
+       u32 pd;
+       u8 key_instance;
+       u8 pagesize;
+       u8 mr_control;
+       u8 local_remote_access_ctrl;
+       u8 reserved[0x20 - 0x18];
+       u64 at_pointer[4];
+};
+#endif /*_EHCA_QES_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
new file mode 100644 (file)
index 0000000..4b27bed
--- /dev/null
@@ -0,0 +1,1506 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  QP functions
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/current.h>
+
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+#include "hipz_fns.h"
+
+static struct kmem_cache *qp_cache;
+
+/*
+ * attributes not supported by query qp
+ */
+#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
+                                    IB_QP_MAX_QP_RD_ATOMIC   | \
+                                    IB_QP_ACCESS_FLAGS       | \
+                                    IB_QP_EN_SQD_ASYNC_NOTIFY)
+
+/*
+ * ehca (internal) qp state values
+ */
+enum ehca_qp_state {
+       EHCA_QPS_RESET = 1,
+       EHCA_QPS_INIT = 2,
+       EHCA_QPS_RTR = 3,
+       EHCA_QPS_RTS = 5,
+       EHCA_QPS_SQD = 6,
+       EHCA_QPS_SQE = 8,
+       EHCA_QPS_ERR = 128
+};
+
+/*
+ * qp state transitions as defined by IB Arch Rel 1.1 page 431
+ */
+enum ib_qp_statetrans {
+       IB_QPST_ANY2RESET,
+       IB_QPST_ANY2ERR,
+       IB_QPST_RESET2INIT,
+       IB_QPST_INIT2RTR,
+       IB_QPST_INIT2INIT,
+       IB_QPST_RTR2RTS,
+       IB_QPST_RTS2SQD,
+       IB_QPST_RTS2RTS,
+       IB_QPST_SQD2RTS,
+       IB_QPST_SQE2RTS,
+       IB_QPST_SQD2SQD,
+       IB_QPST_MAX     /* nr of transitions, this must be last!!! */
+};
+
+/*
+ * ib2ehca_qp_state maps IB to ehca qp_state
+ * returns ehca qp state corresponding to given ib qp state
+ */
+static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
+{
+       switch (ib_qp_state) {
+       case IB_QPS_RESET:
+               return EHCA_QPS_RESET;
+       case IB_QPS_INIT:
+               return EHCA_QPS_INIT;
+       case IB_QPS_RTR:
+               return EHCA_QPS_RTR;
+       case IB_QPS_RTS:
+               return EHCA_QPS_RTS;
+       case IB_QPS_SQD:
+               return EHCA_QPS_SQD;
+       case IB_QPS_SQE:
+               return EHCA_QPS_SQE;
+       case IB_QPS_ERR:
+               return EHCA_QPS_ERR;
+       default:
+               ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
+               return -EINVAL;
+       }
+}
+
+/*
+ * ehca2ib_qp_state maps ehca to IB qp_state
+ * returns ib qp state corresponding to given ehca qp state
+ */
+static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
+                                               ehca_qp_state)
+{
+       switch (ehca_qp_state) {
+       case EHCA_QPS_RESET:
+               return IB_QPS_RESET;
+       case EHCA_QPS_INIT:
+               return IB_QPS_INIT;
+       case EHCA_QPS_RTR:
+               return IB_QPS_RTR;
+       case EHCA_QPS_RTS:
+               return IB_QPS_RTS;
+       case EHCA_QPS_SQD:
+               return IB_QPS_SQD;
+       case EHCA_QPS_SQE:
+               return IB_QPS_SQE;
+       case EHCA_QPS_ERR:
+               return IB_QPS_ERR;
+       default:
+               ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
+               return -EINVAL;
+       }
+}
+
+/*
+ * ehca_qp_type used as index for req_attr and opt_attr of
+ * struct ehca_modqp_statetrans
+ */
+enum ehca_qp_type {
+       QPT_RC = 0,
+       QPT_UC = 1,
+       QPT_UD = 2,
+       QPT_SQP = 3,
+       QPT_MAX
+};
+
+/*
+ * ib2ehcaqptype maps Ib to ehca qp_type
+ * returns ehca qp type corresponding to ib qp type
+ */
+static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
+{
+       switch (ibqptype) {
+       case IB_QPT_SMI:
+       case IB_QPT_GSI:
+               return QPT_SQP;
+       case IB_QPT_RC:
+               return QPT_RC;
+       case IB_QPT_UC:
+               return QPT_UC;
+       case IB_QPT_UD:
+               return QPT_UD;
+       default:
+               ehca_gen_err("Invalid ibqptype=%x", ibqptype);
+               return -EINVAL;
+       }
+}
+
+static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
+                                                        int ib_tostate)
+{
+       int index = -EINVAL;
+       switch (ib_tostate) {
+       case IB_QPS_RESET:
+               index = IB_QPST_ANY2RESET;
+               break;
+       case IB_QPS_INIT:
+               switch (ib_fromstate) {
+               case IB_QPS_RESET:
+                       index = IB_QPST_RESET2INIT;
+                       break;
+               case IB_QPS_INIT:
+                       index = IB_QPST_INIT2INIT;
+                       break;
+               }
+               break;
+       case IB_QPS_RTR:
+               if (ib_fromstate == IB_QPS_INIT)
+                       index = IB_QPST_INIT2RTR;
+               break;
+       case IB_QPS_RTS:
+               switch (ib_fromstate) {
+               case IB_QPS_RTR:
+                       index = IB_QPST_RTR2RTS;
+                       break;
+               case IB_QPS_RTS:
+                       index = IB_QPST_RTS2RTS;
+                       break;
+               case IB_QPS_SQD:
+                       index = IB_QPST_SQD2RTS;
+                       break;
+               case IB_QPS_SQE:
+                       index = IB_QPST_SQE2RTS;
+                       break;
+               }
+               break;
+       case IB_QPS_SQD:
+               if (ib_fromstate == IB_QPS_RTS)
+                       index = IB_QPST_RTS2SQD;
+               break;
+       case IB_QPS_SQE:
+               break;
+       case IB_QPS_ERR:
+               index = IB_QPST_ANY2ERR;
+               break;
+       default:
+               break;
+       }
+       return index;
+}
+
+enum ehca_service_type {
+       ST_RC = 0,
+       ST_UC = 1,
+       ST_RD = 2,
+       ST_UD = 3
+};
+
+/*
+ * ibqptype2servicetype returns hcp service type corresponding to given
+ * ib qp type used by create_qp()
+ */
+static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
+{
+       switch (ibqptype) {
+       case IB_QPT_SMI:
+       case IB_QPT_GSI:
+               return ST_UD;
+       case IB_QPT_RC:
+               return ST_RC;
+       case IB_QPT_UC:
+               return ST_UC;
+       case IB_QPT_UD:
+               return ST_UD;
+       case IB_QPT_RAW_IPV6:
+               return -EINVAL;
+       case IB_QPT_RAW_ETY:
+               return -EINVAL;
+       default:
+               ehca_gen_err("Invalid ibqptype=%x", ibqptype);
+               return -EINVAL;
+       }
+}
+
+/*
+ * init_qp_queues initializes/constructs r/squeue and registers queue pages.
+ */
+static inline int init_qp_queues(struct ehca_shca *shca,
+                                struct ehca_qp *my_qp,
+                                int nr_sq_pages,
+                                int nr_rq_pages,
+                                int swqe_size,
+                                int rwqe_size,
+                                int nr_send_sges, int nr_receive_sges)
+{
+       int ret, cnt, ipz_rc;
+       void *vpage;
+       u64 rpage, h_ret;
+       struct ib_device *ib_dev = &shca->ib_device;
+       struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
+
+       ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
+                               nr_sq_pages,
+                               EHCA_PAGESIZE, swqe_size, nr_send_sges);
+       if (!ipz_rc) {
+               ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
+                        ipz_rc);
+               return -EBUSY;
+       }
+
+       ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
+                               nr_rq_pages,
+                               EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
+       if (!ipz_rc) {
+               ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
+                        ipz_rc);
+               ret = -EBUSY;
+               goto init_qp_queues0;
+       }
+       /* register SQ pages */
+       for (cnt = 0; cnt < nr_sq_pages; cnt++) {
+               vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
+               if (!vpage) {
+                       ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
+                                "failed p_vpage= %p", vpage);
+                       ret = -EINVAL;
+                       goto init_qp_queues1;
+               }
+               rpage = virt_to_abs(vpage);
+
+               h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
+                                                my_qp->ipz_qp_handle,
+                                                &my_qp->pf, 0, 0,
+                                                rpage, 1,
+                                                my_qp->galpas.kernel);
+               if (h_ret < H_SUCCESS) {
+                       ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
+                                " failed rc=%lx", h_ret);
+                       ret = ehca2ib_return_code(h_ret);
+                       goto init_qp_queues1;
+               }
+       }
+
+       ipz_qeit_reset(&my_qp->ipz_squeue);
+
+       /* register RQ pages */
+       for (cnt = 0; cnt < nr_rq_pages; cnt++) {
+               vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
+               if (!vpage) {
+                       ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
+                                "failed p_vpage = %p", vpage);
+                       ret = -EINVAL;
+                       goto init_qp_queues1;
+               }
+
+               rpage = virt_to_abs(vpage);
+
+               h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
+                                                my_qp->ipz_qp_handle,
+                                                &my_qp->pf, 0, 1,
+                                                rpage, 1,my_qp->galpas.kernel);
+               if (h_ret < H_SUCCESS) {
+                       ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
+                                "rc=%lx", h_ret);
+                       ret = ehca2ib_return_code(h_ret);
+                       goto init_qp_queues1;
+               }
+               if (cnt == (nr_rq_pages - 1)) { /* last page! */
+                       if (h_ret != H_SUCCESS) {
+                               ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
+                                        "h_ret= %lx ", h_ret);
+                               ret = ehca2ib_return_code(h_ret);
+                               goto init_qp_queues1;
+                       }
+                       vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
+                       if (vpage) {
+                               ehca_err(ib_dev, "ipz_qpageit_get_inc() "
+                                        "should not succeed vpage=%p", vpage);
+                               ret = -EINVAL;
+                               goto init_qp_queues1;
+                       }
+               } else {
+                       if (h_ret != H_PAGE_REGISTERED) {
+                               ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
+                                        "h_ret= %lx ", h_ret);
+                               ret = ehca2ib_return_code(h_ret);
+                               goto init_qp_queues1;
+                       }
+               }
+       }
+
+       ipz_qeit_reset(&my_qp->ipz_rqueue);
+
+       return 0;
+
+init_qp_queues1:
+       ipz_queue_dtor(&my_qp->ipz_rqueue);
+init_qp_queues0:
+       ipz_queue_dtor(&my_qp->ipz_squeue);
+       return ret;
+}
+
+struct ib_qp *ehca_create_qp(struct ib_pd *pd,
+                            struct ib_qp_init_attr *init_attr,
+                            struct ib_udata *udata)
+{
+       static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
+       static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
+       struct ehca_qp *my_qp;
+       struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+                                             ib_device);
+       struct ib_ucontext *context = NULL;
+       u64 h_ret;
+       int max_send_sge, max_recv_sge, ret;
+
+       /* h_call's out parameters */
+       struct ehca_alloc_qp_parms parms;
+       u32 swqe_size = 0, rwqe_size = 0;
+       u8 daqp_completion, isdaqp;
+       unsigned long flags;
+
+       if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
+               init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
+               ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
+                        init_attr->sq_sig_type);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* save daqp completion bits */
+       daqp_completion = init_attr->qp_type & 0x60;
+       /* save daqp bit */
+       isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
+       init_attr->qp_type = init_attr->qp_type & 0x1F;
+
+       if (init_attr->qp_type != IB_QPT_UD &&
+           init_attr->qp_type != IB_QPT_SMI &&
+           init_attr->qp_type != IB_QPT_GSI &&
+           init_attr->qp_type != IB_QPT_UC &&
+           init_attr->qp_type != IB_QPT_RC) {
+               ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
+               return ERR_PTR(-EINVAL);
+       }
+       if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
+           && isdaqp) {
+               ehca_err(pd->device, "unsupported LL QP Type=%x",
+                        init_attr->qp_type);
+               return ERR_PTR(-EINVAL);
+       } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
+                  (init_attr->cap.max_send_wr > 255 ||
+                   init_attr->cap.max_recv_wr > 255 )) {
+                      ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
+                               "or max_rq_wr=%x for QP Type=%x",
+                               init_attr->cap.max_send_wr,
+                               init_attr->cap.max_recv_wr,init_attr->qp_type);
+                      return ERR_PTR(-EINVAL);
+       } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
+                 init_attr->cap.max_send_wr > 255) {
+               ehca_err(pd->device,
+                        "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
+                        init_attr->cap.max_send_wr, init_attr->qp_type);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (pd->uobject && udata)
+               context = pd->uobject->context;
+
+       my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
+       if (!my_qp) {
+               ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       memset(my_qp, 0, sizeof(struct ehca_qp));
+       memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
+       spin_lock_init(&my_qp->spinlock_s);
+       spin_lock_init(&my_qp->spinlock_r);
+
+       my_qp->recv_cq =
+               container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
+       my_qp->send_cq =
+               container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
+
+       my_qp->init_attr = *init_attr;
+
+       do {
+               if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
+                       ret = -ENOMEM;
+                       ehca_err(pd->device, "Can't reserve idr resources.");
+                       goto create_qp_exit0;
+               }
+
+               spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+               ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
+               spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+       } while (ret == -EAGAIN);
+
+       if (ret) {
+               ret = -ENOMEM;
+               ehca_err(pd->device, "Can't allocate new idr entry.");
+               goto create_qp_exit0;
+       }
+
+       parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
+       if (parms.servicetype < 0) {
+               ret = -EINVAL;
+               ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
+               goto create_qp_exit0;
+       }
+
+       if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+               parms.sigtype = HCALL_SIGT_EVERY;
+       else
+               parms.sigtype = HCALL_SIGT_BY_WQE;
+
+       /* UD_AV CIRCUMVENTION */
+       max_send_sge = init_attr->cap.max_send_sge;
+       max_recv_sge = init_attr->cap.max_recv_sge;
+       if (IB_QPT_UD == init_attr->qp_type ||
+           IB_QPT_GSI == init_attr->qp_type ||
+           IB_QPT_SMI == init_attr->qp_type) {
+               max_send_sge += 2;
+               max_recv_sge += 2;
+       }
+
+       parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
+       parms.daqp_ctrl = isdaqp | daqp_completion;
+       parms.pd = my_pd->fw_pd;
+       parms.max_recv_sge = max_recv_sge;
+       parms.max_send_sge = max_send_sge;
+
+       h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);
+
+       if (h_ret != H_SUCCESS) {
+               ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
+                        h_ret);
+               ret = ehca2ib_return_code(h_ret);
+               goto create_qp_exit1;
+       }
+
+       switch (init_attr->qp_type) {
+       case IB_QPT_RC:
+               if (isdaqp == 0) {
+                       swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
+                                            (parms.act_nr_send_sges)]);
+                       rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
+                                            (parms.act_nr_recv_sges)]);
+               } else { /* for daqp we need to use msg size, not wqe size */
+                       swqe_size = da_rc_msg_size[max_send_sge];
+                       rwqe_size = da_rc_msg_size[max_recv_sge];
+                       parms.act_nr_send_sges = 1;
+                       parms.act_nr_recv_sges = 1;
+               }
+               break;
+       case IB_QPT_UC:
+               swqe_size = offsetof(struct ehca_wqe,
+                                    u.nud.sg_list[parms.act_nr_send_sges]);
+               rwqe_size = offsetof(struct ehca_wqe,
+                                    u.nud.sg_list[parms.act_nr_recv_sges]);
+               break;
+
+       case IB_QPT_UD:
+       case IB_QPT_GSI:
+       case IB_QPT_SMI:
+               /* UD circumvention */
+               parms.act_nr_recv_sges -= 2;
+               parms.act_nr_send_sges -= 2;
+               if (isdaqp) {
+                       swqe_size = da_ud_sq_msg_size[max_send_sge];
+                       rwqe_size = da_rc_msg_size[max_recv_sge];
+                       parms.act_nr_send_sges = 1;
+                       parms.act_nr_recv_sges = 1;
+               } else {
+                       swqe_size = offsetof(struct ehca_wqe,
+                                            u.ud_av.sg_list[parms.act_nr_send_sges]);
+                       rwqe_size = offsetof(struct ehca_wqe,
+                                            u.ud_av.sg_list[parms.act_nr_recv_sges]);
+               }
+
+               if (IB_QPT_GSI == init_attr->qp_type ||
+                   IB_QPT_SMI == init_attr->qp_type) {
+                       parms.act_nr_send_wqes = init_attr->cap.max_send_wr;
+                       parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
+                       parms.act_nr_send_sges = init_attr->cap.max_send_sge;
+                       parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
+                       my_qp->real_qp_num =
+                               (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
+               }
+
+               break;
+
+       default:
+               break;
+       }
+
+       /* initializes r/squeue and registers queue pages */
+       ret = init_qp_queues(shca, my_qp,
+                            parms.nr_sq_pages, parms.nr_rq_pages,
+                            swqe_size, rwqe_size,
+                            parms.act_nr_send_sges, parms.act_nr_recv_sges);
+       if (ret) {
+               ehca_err(pd->device,
+                        "Couldn't initialize r/squeue and pages ret=%x", ret);
+               goto create_qp_exit2;
+       }
+
+       my_qp->ib_qp.pd = &my_pd->ib_pd;
+       my_qp->ib_qp.device = my_pd->ib_pd.device;
+
+       my_qp->ib_qp.recv_cq = init_attr->recv_cq;
+       my_qp->ib_qp.send_cq = init_attr->send_cq;
+
+       my_qp->ib_qp.qp_num = my_qp->real_qp_num;
+       my_qp->ib_qp.qp_type = init_attr->qp_type;
+
+       my_qp->qp_type = init_attr->qp_type;
+       my_qp->ib_qp.srq = init_attr->srq;
+
+       my_qp->ib_qp.qp_context = init_attr->qp_context;
+       my_qp->ib_qp.event_handler = init_attr->event_handler;
+
+       init_attr->cap.max_inline_data = 0; /* not supported yet */
+       init_attr->cap.max_recv_sge = parms.act_nr_recv_sges;
+       init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes;
+       init_attr->cap.max_send_sge = parms.act_nr_send_sges;
+       init_attr->cap.max_send_wr = parms.act_nr_send_wqes;
+
+       /* NOTE: define_apq0() not supported yet */
+       if (init_attr->qp_type == IB_QPT_GSI) {
+               h_ret = ehca_define_sqp(shca, my_qp, init_attr);
+               if (h_ret != H_SUCCESS) {
+                       ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
+                                h_ret);
+                       ret = ehca2ib_return_code(h_ret);
+                       goto create_qp_exit3;
+               }
+       }
+       if (init_attr->send_cq) {
+               struct ehca_cq *cq = container_of(init_attr->send_cq,
+                                                 struct ehca_cq, ib_cq);
+               ret = ehca_cq_assign_qp(cq, my_qp);
+               if (ret) {
+                       ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
+                                ret);
+                       goto create_qp_exit3;
+               }
+               my_qp->send_cq = cq;
+       }
+       /* copy queues, galpa data to user space */
+       if (context && udata) {
+               struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
+               struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
+               struct ehca_create_qp_resp resp;
+               struct vm_area_struct * vma;
+               memset(&resp, 0, sizeof(resp));
+
+               resp.qp_num = my_qp->real_qp_num;
+               resp.token = my_qp->token;
+               resp.qp_type = my_qp->qp_type;
+               resp.qkey = my_qp->qkey;
+               resp.real_qp_num = my_qp->real_qp_num;
+               /* rqueue properties */
+               resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size;
+               resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg;
+               resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
+               resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
+               resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
+               ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
+                                      ipz_rqueue->queue_length,
+                                      (void**)&resp.ipz_rqueue.queue,
+                                      &vma);
+               if (ret) {
+                       ehca_err(pd->device, "Could not mmap rqueue pages");
+                       goto create_qp_exit3;
+               }
+               my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
+               /* squeue properties */
+               resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
+               resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
+               resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
+               resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
+               resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
+               ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
+                                      ipz_squeue->queue_length,
+                                      (void**)&resp.ipz_squeue.queue,
+                                      &vma);
+               if (ret) {
+                       ehca_err(pd->device, "Could not mmap squeue pages");
+                       goto create_qp_exit4;
+               }
+               my_qp->uspace_squeue = resp.ipz_squeue.queue;
+               /* fw_handle */
+               resp.galpas = my_qp->galpas;
+               ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
+                                        (void**)&resp.galpas.kernel.fw_handle,
+                                        &vma);
+               if (ret) {
+                       ehca_err(pd->device, "Could not mmap fw_handle");
+                       goto create_qp_exit5;
+               }
+               my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
+
+               if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+                       ehca_err(pd->device, "Copy to udata failed");
+                       ret = -EINVAL;
+                       goto create_qp_exit6;
+               }
+       }
+
+       return &my_qp->ib_qp;
+
+create_qp_exit6:
+       ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+
+create_qp_exit5:
+       ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
+
+create_qp_exit4:
+       ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
+
+create_qp_exit3:
+       ipz_queue_dtor(&my_qp->ipz_rqueue);
+       ipz_queue_dtor(&my_qp->ipz_squeue);
+
+create_qp_exit2:
+       hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
+
+create_qp_exit1:
+       spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+       idr_remove(&ehca_qp_idr, my_qp->token);
+       spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+create_qp_exit0:
+       kmem_cache_free(qp_cache, my_qp);
+       return ERR_PTR(ret);
+}
+
+/*
+ * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
+ * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
+ * returns total number of bad wqes in bad_wqe_cnt
+ */
+static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
+                          int *bad_wqe_cnt)
+{
+       u64 h_ret;
+       struct ipz_queue *squeue;
+       void *bad_send_wqe_p, *bad_send_wqe_v;
+       void *squeue_start_p, *squeue_end_p;
+       void *squeue_start_v, *squeue_end_v;
+       struct ehca_wqe *wqe;
+       int qp_num = my_qp->ib_qp.qp_num;
+
+       /* get send wqe pointer */
+       h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
+                                          my_qp->ipz_qp_handle, &my_qp->pf,
+                                          &bad_send_wqe_p, NULL, 2);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
+                        " ehca_qp=%p qp_num=%x h_ret=%lx",
+                        my_qp, qp_num, h_ret);
+               return ehca2ib_return_code(h_ret);
+       }
+       bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
+       ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
+                qp_num, bad_send_wqe_p);
+       /* convert wqe pointer to vadr */
+       bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
+       if (ehca_debug_level)
+               ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
+       squeue = &my_qp->ipz_squeue;
+       squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
+       squeue_end_p = squeue_start_p+squeue->queue_length;
+       squeue_start_v = abs_to_virt((u64)squeue_start_p);
+       squeue_end_v = abs_to_virt((u64)squeue_end_p);
+       ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
+                qp_num, squeue_start_v, squeue_end_v);
+
+       /* loop sets wqe's purge bit */
+       wqe = (struct ehca_wqe*)bad_send_wqe_v;
+       *bad_wqe_cnt = 0;
+       while (wqe->optype != 0xff && wqe->wqef != 0xff) {
+               if (ehca_debug_level)
+                       ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
+               wqe->nr_of_data_seg = 0; /* suppress data access */
+               wqe->wqef = WQEF_PURGE; /* WQE to be purged */
+               wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
+               *bad_wqe_cnt = (*bad_wqe_cnt)+1;
+               if ((void*)wqe >= squeue_end_v) {
+                       wqe = squeue_start_v;
+               }
+       }
+       /*
+        * bad wqe will be reprocessed and ignored when pol_cq() is called,
+        *  i.e. nr of wqes with flush error status is one less
+        */
+       ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
+                qp_num, (*bad_wqe_cnt)-1);
+       wqe->wqef = 0;
+
+       return 0;
+}
+
+/*
+ * internal_modify_qp with circumvention to handle aqp0 properly
+ * smi_reset2init indicates if this is an internal reset-to-init-call for
+ * smi. This flag must always be zero if called from ehca_modify_qp()!
+ * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
+ */
+static int internal_modify_qp(struct ib_qp *ibqp,
+                             struct ib_qp_attr *attr,
+                             int attr_mask, int smi_reset2init)
+{
+       enum ib_qp_state qp_cur_state, qp_new_state;
+       int cnt, qp_attr_idx, ret = 0;
+       enum ib_qp_statetrans statetrans;
+       struct hcp_modify_qp_control_block *mqpcb;
+       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+       struct ehca_shca *shca =
+               container_of(ibqp->pd->device, struct ehca_shca, ib_device);
+       u64 update_mask;
+       u64 h_ret;
+       int bad_wqe_cnt = 0;
+       int squeue_locked = 0;
+       unsigned long spl_flags = 0;
+
+       /* do query_qp to obtain current attr values */
+       mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+       if (mqpcb == NULL) {
+               ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
+                        "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
+               return -ENOMEM;
+       }
+
+       h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
+                               my_qp->ipz_qp_handle,
+                               &my_qp->pf,
+                               mqpcb, my_qp->galpas.kernel);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(ibqp->device, "hipz_h_query_qp() failed "
+                        "ehca_qp=%p qp_num=%x h_ret=%lx",
+                        my_qp, ibqp->qp_num, h_ret);
+               ret = ehca2ib_return_code(h_ret);
+               goto modify_qp_exit1;
+       }
+
+       qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
+
+       if (qp_cur_state == -EINVAL) {  /* invalid qp state */
+               ret = -EINVAL;
+               ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
+                        "ehca_qp=%p qp_num=%x",
+                        mqpcb->qp_state, my_qp, ibqp->qp_num);
+               goto modify_qp_exit1;
+       }
+       /*
+        * circumvention to set aqp0 initial state to init
+        * as expected by IB spec
+        */
+       if (smi_reset2init == 0 &&
+           ibqp->qp_type == IB_QPT_SMI &&
+           qp_cur_state == IB_QPS_RESET &&
+           (attr_mask & IB_QP_STATE) &&
+           attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
+               struct ib_qp_attr smiqp_attr = {
+                       .qp_state = IB_QPS_INIT,
+                       .port_num = my_qp->init_attr.port_num,
+                       .pkey_index = 0,
+                       .qkey = 0
+               };
+               int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
+                       IB_QP_PKEY_INDEX | IB_QP_QKEY;
+               int smirc = internal_modify_qp(
+                       ibqp, &smiqp_attr, smiqp_attr_mask, 1);
+               if (smirc) {
+                       ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
+                                "ehca_modify_qp() rc=%x", smirc);
+                       ret = H_PARAMETER;
+                       goto modify_qp_exit1;
+               }
+               qp_cur_state = IB_QPS_INIT;
+               ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
+       }
+       /* is transmitted current state  equal to "real" current state */
+       if ((attr_mask & IB_QP_CUR_STATE) &&
+           qp_cur_state != attr->cur_qp_state) {
+               ret = -EINVAL;
+               ehca_err(ibqp->device,
+                        "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
+                        " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
+                        attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
+               goto modify_qp_exit1;
+       }
+
+       ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
+                "new qp_state=%x attribute_mask=%x",
+                my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
+
+       qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
+       if (!smi_reset2init &&
+           !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
+                               attr_mask)) {
+               ret = -EINVAL;
+               ehca_err(ibqp->device,
+                        "Invalid qp transition new_state=%x cur_state=%x "
+                        "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
+                        qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
+               goto modify_qp_exit1;
+       }
+
+       if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
+               update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
+       else {
+               ret = -EINVAL;
+               ehca_err(ibqp->device, "Invalid new qp state=%x "
+                        "ehca_qp=%p qp_num=%x",
+                        qp_new_state, my_qp, ibqp->qp_num);
+               goto modify_qp_exit1;
+       }
+
+       /* retrieve state transition struct to get req and opt attrs */
+       statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
+       if (statetrans < 0) {
+               ret = -EINVAL;
+               ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
+                        "new_qp_state=%x State_xsition=%x ehca_qp=%p "
+                        "qp_num=%x", qp_cur_state, qp_new_state,
+                        statetrans, my_qp, ibqp->qp_num);
+               goto modify_qp_exit1;
+       }
+
+       qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
+
+       if (qp_attr_idx < 0) {
+               ret = qp_attr_idx;
+               ehca_err(ibqp->device,
+                        "Invalid QP type=%x ehca_qp=%p qp_num=%x",
+                        ibqp->qp_type, my_qp, ibqp->qp_num);
+               goto modify_qp_exit1;
+       }
+
+       ehca_dbg(ibqp->device,
+                "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
+                my_qp, ibqp->qp_num, statetrans);
+
+       /* sqe -> rts: set purge bit of bad wqe before actual trans */
+       if ((my_qp->qp_type == IB_QPT_UD ||
+            my_qp->qp_type == IB_QPT_GSI ||
+            my_qp->qp_type == IB_QPT_SMI) &&
+           statetrans == IB_QPST_SQE2RTS) {
+               /* mark next free wqe if kernel */
+               if (my_qp->uspace_squeue == 0) {
+                       struct ehca_wqe *wqe;
+                       /* lock send queue */
+                       spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+                       squeue_locked = 1;
+                       /* mark next free wqe */
+                       wqe = (struct ehca_wqe*)
+                               ipz_qeit_get(&my_qp->ipz_squeue);
+                       wqe->optype = wqe->wqef = 0xff;
+                       ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
+                                ibqp->qp_num, wqe);
+               }
+               ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
+               if (ret) {
+                       ehca_err(ibqp->device, "prepare_sqe_rts() failed "
+                                "ehca_qp=%p qp_num=%x ret=%x",
+                                my_qp, ibqp->qp_num, ret);
+                       goto modify_qp_exit2;
+               }
+       }
+
+       /*
+        * enable RDMA_Atomic_Control if reset->init und reliable con
+        * this is necessary since gen2 does not provide that flag,
+        * but pHyp requires it
+        */
+       if (statetrans == IB_QPST_RESET2INIT &&
+           (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
+               mqpcb->rdma_atomic_ctrl = 3;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
+       }
+       /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
+       if (statetrans == IB_QPST_INIT2RTR &&
+           (ibqp->qp_type == IB_QPT_UC) &&
+           !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
+               mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
+       }
+
+       if (attr_mask & IB_QP_PKEY_INDEX) {
+               mqpcb->prim_p_key_idx = attr->pkey_index;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
+       }
+       if (attr_mask & IB_QP_PORT) {
+               if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
+                       ret = -EINVAL;
+                       ehca_err(ibqp->device, "Invalid port=%x. "
+                                "ehca_qp=%p qp_num=%x num_ports=%x",
+                                attr->port_num, my_qp, ibqp->qp_num,
+                                shca->num_ports);
+                       goto modify_qp_exit2;
+               }
+               mqpcb->prim_phys_port = attr->port_num;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
+       }
+       if (attr_mask & IB_QP_QKEY) {
+               mqpcb->qkey = attr->qkey;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
+       }
+       if (attr_mask & IB_QP_AV) {
+               int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
+               int ehca_mult = ib_rate_to_mult(shca->sport[my_qp->
+                                               init_attr.port_num].rate);
+
+               mqpcb->dlid = attr->ah_attr.dlid;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
+               mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
+               mqpcb->service_level = attr->ah_attr.sl;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
+
+               if (ah_mult < ehca_mult)
+                       mqpcb->max_static_rate = (ah_mult > 0) ?
+                       ((ehca_mult - 1) / ah_mult) : 0;
+               else
+                       mqpcb->max_static_rate = 0;
+
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
+
+               /*
+                * only if GRH is TRUE we might consider SOURCE_GID_IDX
+                * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
+                */
+               if (attr->ah_attr.ah_flags == IB_AH_GRH) {
+                       mqpcb->send_grh_flag = 1 << 31;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
+                       mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
+
+                       for (cnt = 0; cnt < 16; cnt++)
+                               mqpcb->dest_gid.byte[cnt] =
+                                       attr->ah_attr.grh.dgid.raw[cnt];
+
+                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
+                       mqpcb->flow_label = attr->ah_attr.grh.flow_label;
+                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
+                       mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
+                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
+                       mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
+               }
+       }
+
+       if (attr_mask & IB_QP_PATH_MTU) {
+               mqpcb->path_mtu = attr->path_mtu;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
+       }
+       if (attr_mask & IB_QP_TIMEOUT) {
+               mqpcb->timeout = attr->timeout;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
+       }
+       if (attr_mask & IB_QP_RETRY_CNT) {
+               mqpcb->retry_count = attr->retry_cnt;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
+       }
+       if (attr_mask & IB_QP_RNR_RETRY) {
+               mqpcb->rnr_retry_count = attr->rnr_retry;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
+       }
+       if (attr_mask & IB_QP_RQ_PSN) {
+               mqpcb->receive_psn = attr->rq_psn;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
+       }
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+               mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
+                       attr->max_dest_rd_atomic : 2;
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
+       }
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+               mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
+                       attr->max_rd_atomic : 2;
+               update_mask |=
+                       EHCA_BMASK_SET
+                       (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
+       }
+       if (attr_mask & IB_QP_ALT_PATH) {
+               int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate);
+               int ehca_mult = ib_rate_to_mult(
+                       shca->sport[my_qp->init_attr.port_num].rate);
+
+               mqpcb->dlid_al = attr->alt_ah_attr.dlid;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1);
+               mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1);
+               mqpcb->service_level_al = attr->alt_ah_attr.sl;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1);
+
+               if (ah_mult < ehca_mult)
+                       mqpcb->max_static_rate = (ah_mult > 0) ?
+                       ((ehca_mult - 1) / ah_mult) : 0;
+               else
+                       mqpcb->max_static_rate_al = 0;
+
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1);
+
+               /*
+                * only if GRH is TRUE we might consider SOURCE_GID_IDX
+                * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
+                */
+               if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
+                       mqpcb->send_grh_flag_al = 1 << 31;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
+                       mqpcb->source_gid_idx_al =
+                               attr->alt_ah_attr.grh.sgid_index;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1);
+
+                       for (cnt = 0; cnt < 16; cnt++)
+                               mqpcb->dest_gid_al.byte[cnt] =
+                                       attr->alt_ah_attr.grh.dgid.raw[cnt];
+
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1);
+                       mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1);
+                       mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1);
+                       mqpcb->traffic_class_al =
+                               attr->alt_ah_attr.grh.traffic_class;
+                       update_mask |=
+                               EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
+               }
+       }
+
+       if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+               mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
+       }
+
+       if (attr_mask & IB_QP_SQ_PSN) {
+               mqpcb->send_psn = attr->sq_psn;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
+       }
+
+       if (attr_mask & IB_QP_DEST_QPN) {
+               mqpcb->dest_qp_nr = attr->dest_qp_num;
+               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
+       }
+
+       if (attr_mask & IB_QP_PATH_MIG_STATE) {
+               mqpcb->path_migration_state = attr->path_mig_state;
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
+       }
+
+       if (attr_mask & IB_QP_CAP) {
+               mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
+               mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
+               update_mask |=
+                       EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
+               /* no support for max_send/recv_sge yet */
+       }
+
+       if (ehca_debug_level)
+               ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
+
+       h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
+                                my_qp->ipz_qp_handle,
+                                &my_qp->pf,
+                                update_mask,
+                                mqpcb, my_qp->galpas.kernel);
+
+       if (h_ret != H_SUCCESS) {
+               ret = ehca2ib_return_code(h_ret);
+               ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
+                        "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
+               goto modify_qp_exit2;
+       }
+
+       if ((my_qp->qp_type == IB_QPT_UD ||
+            my_qp->qp_type == IB_QPT_GSI ||
+            my_qp->qp_type == IB_QPT_SMI) &&
+           statetrans == IB_QPST_SQE2RTS) {
+               /* doorbell to reprocessing wqes */
+               iosync(); /* serialize GAL register access */
+               hipz_update_sqa(my_qp, bad_wqe_cnt-1);
+               ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
+       }
+
+       if (statetrans == IB_QPST_RESET2INIT ||
+           statetrans == IB_QPST_INIT2INIT) {
+               mqpcb->qp_enable = 1;
+               mqpcb->qp_state = EHCA_QPS_INIT;
+               update_mask = 0;
+               update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
+
+               h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
+                                        my_qp->ipz_qp_handle,
+                                        &my_qp->pf,
+                                        update_mask,
+                                        mqpcb,
+                                        my_qp->galpas.kernel);
+
+               if (h_ret != H_SUCCESS) {
+                       ret = ehca2ib_return_code(h_ret);
+                       ehca_err(ibqp->device, "ENABLE in context of "
+                                "RESET_2_INIT failed! Maybe you didn't get "
+                                "a LID h_ret=%lx ehca_qp=%p qp_num=%x",
+                                h_ret, my_qp, ibqp->qp_num);
+                       goto modify_qp_exit2;
+               }
+       }
+
+       if (statetrans == IB_QPST_ANY2RESET) {
+               ipz_qeit_reset(&my_qp->ipz_rqueue);
+               ipz_qeit_reset(&my_qp->ipz_squeue);
+       }
+
+       if (attr_mask & IB_QP_QKEY)
+               my_qp->qkey = attr->qkey;
+
+modify_qp_exit2:
+       if (squeue_locked) { /* this means: sqe -> rts */
+               spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
+               my_qp->sqerr_purgeflag = 1;
+       }
+
+modify_qp_exit1:
+       kfree(mqpcb);
+
+       return ret;
+}
+
+int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
+{
+       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+       struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+                                            ib_pd);
+       u32 cur_pid = current->tgid;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       return internal_modify_qp(ibqp, attr, attr_mask, 0);
+}
+
+int ehca_query_qp(struct ib_qp *qp,
+                 struct ib_qp_attr *qp_attr,
+                 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+       struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+                                            ib_pd);
+       struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
+                                             ib_device);
+       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+       struct hcp_modify_qp_control_block *qpcb;
+       u32 cur_pid = current->tgid;
+       int cnt, ret = 0;
+       u64 h_ret;
+
+       if (my_pd->ib_pd.uobject  && my_pd->ib_pd.uobject->context  &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
+               ehca_err(qp->device,"Invalid attribute mask "
+                        "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
+                        my_qp, qp->qp_num, qp_attr_mask);
+               return -EINVAL;
+       }
+
+       qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL );
+       if (!qpcb) {
+               ehca_err(qp->device,"Out of memory for qpcb "
+                        "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
+               return -ENOMEM;
+       }
+
+       h_ret = hipz_h_query_qp(adapter_handle,
+                               my_qp->ipz_qp_handle,
+                               &my_qp->pf,
+                               qpcb, my_qp->galpas.kernel);
+
+       if (h_ret != H_SUCCESS) {
+               ret = ehca2ib_return_code(h_ret);
+               ehca_err(qp->device,"hipz_h_query_qp() failed "
+                        "ehca_qp=%p qp_num=%x h_ret=%lx",
+                        my_qp, qp->qp_num, h_ret);
+               goto query_qp_exit1;
+       }
+
+       qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
+       qp_attr->qp_state = qp_attr->cur_qp_state;
+
+       if (qp_attr->cur_qp_state == -EINVAL) {
+               ret = -EINVAL;
+               ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
+                        "ehca_qp=%p qp_num=%x",
+                        qpcb->qp_state, my_qp, qp->qp_num);
+               goto query_qp_exit1;
+       }
+
+       if (qp_attr->qp_state == IB_QPS_SQD)
+               qp_attr->sq_draining = 1;
+
+       qp_attr->qkey = qpcb->qkey;
+       qp_attr->path_mtu = qpcb->path_mtu;
+       qp_attr->path_mig_state = qpcb->path_migration_state;
+       qp_attr->rq_psn = qpcb->receive_psn;
+       qp_attr->sq_psn = qpcb->send_psn;
+       qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
+       qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
+       qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
+       /* UD_AV CIRCUMVENTION */
+       if (my_qp->qp_type == IB_QPT_UD) {
+               qp_attr->cap.max_send_sge =
+                       qpcb->actual_nr_sges_in_sq_wqe - 2;
+               qp_attr->cap.max_recv_sge =
+                       qpcb->actual_nr_sges_in_rq_wqe - 2;
+       } else {
+               qp_attr->cap.max_send_sge =
+                       qpcb->actual_nr_sges_in_sq_wqe;
+               qp_attr->cap.max_recv_sge =
+                       qpcb->actual_nr_sges_in_rq_wqe;
+       }
+
+       qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
+       qp_attr->dest_qp_num = qpcb->dest_qp_nr;
+
+       qp_attr->pkey_index =
+               EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx);
+
+       qp_attr->port_num =
+               EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
+
+       qp_attr->timeout = qpcb->timeout;
+       qp_attr->retry_cnt = qpcb->retry_count;
+       qp_attr->rnr_retry = qpcb->rnr_retry_count;
+
+       qp_attr->alt_pkey_index =
+               EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
+
+       qp_attr->alt_port_num = qpcb->alt_phys_port;
+       qp_attr->alt_timeout = qpcb->timeout_al;
+
+       /* primary av */
+       qp_attr->ah_attr.sl = qpcb->service_level;
+
+       if (qpcb->send_grh_flag) {
+               qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+       }
+
+       qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
+       qp_attr->ah_attr.dlid = qpcb->dlid;
+       qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
+       qp_attr->ah_attr.port_num = qp_attr->port_num;
+
+       /* primary GRH */
+       qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
+       qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
+       qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
+       qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
+
+       for (cnt = 0; cnt < 16; cnt++)
+               qp_attr->ah_attr.grh.dgid.raw[cnt] =
+                       qpcb->dest_gid.byte[cnt];
+
+       /* alternate AV */
+       qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
+       if (qpcb->send_grh_flag_al) {
+               qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
+       }
+
+       qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
+       qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
+       qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
+
+       /* alternate GRH */
+       qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
+       qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
+       qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
+       qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
+
+       for (cnt = 0; cnt < 16; cnt++)
+               qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
+                       qpcb->dest_gid_al.byte[cnt];
+
+       /* return init attributes given in ehca_create_qp */
+       if (qp_init_attr)
+               *qp_init_attr = my_qp->init_attr;
+
+       if (ehca_debug_level)
+               ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
+
+query_qp_exit1:
+       kfree(qpcb);
+
+       return ret;
+}
+
+int ehca_destroy_qp(struct ib_qp *ibqp)
+{
+       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+       struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+                                             ib_device);
+       struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+                                            ib_pd);
+       u32 cur_pid = current->tgid;
+       u32 qp_num = ibqp->qp_num;
+       int ret;
+       u64 h_ret;
+       u8 port_num;
+       enum ib_qp_type qp_type;
+       unsigned long flags;
+
+       if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+           my_pd->ownpid != cur_pid) {
+               ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
+                        cur_pid, my_pd->ownpid);
+               return -EINVAL;
+       }
+
+       if (my_qp->send_cq) {
+               ret = ehca_cq_unassign_qp(my_qp->send_cq,
+                                             my_qp->real_qp_num);
+               if (ret) {
+                       ehca_err(ibqp->device, "Couldn't unassign qp from "
+                                "send_cq ret=%x qp_num=%x cq_num=%x", ret,
+                                my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number);
+                       return ret;
+               }
+       }
+
+       spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+       idr_remove(&ehca_qp_idr, my_qp->token);
+       spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+       /* un-mmap if vma alloc */
+       if (my_qp->uspace_rqueue) {
+               ret = ehca_munmap(my_qp->uspace_rqueue,
+                                 my_qp->ipz_rqueue.queue_length);
+               if (ret)
+                       ehca_err(ibqp->device, "Could not munmap rqueue "
+                                "qp_num=%x", qp_num);
+               ret = ehca_munmap(my_qp->uspace_squeue,
+                                 my_qp->ipz_squeue.queue_length);
+               if (ret)
+                       ehca_err(ibqp->device, "Could not munmap squeue "
+                                "qp_num=%x", qp_num);
+               ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+               if (ret)
+                       ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
+                                qp_num);
+       }
+
+       h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
+       if (h_ret != H_SUCCESS) {
+               ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
+                        "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
+               return ehca2ib_return_code(h_ret);
+       }
+
+       port_num = my_qp->init_attr.port_num;
+       qp_type  = my_qp->init_attr.qp_type;
+
+       /* no support for IB_QPT_SMI yet */
+       if (qp_type == IB_QPT_GSI) {
+               struct ib_event event;
+               ehca_info(ibqp->device, "device %s: port %x is inactive.",
+                         shca->ib_device.name, port_num);
+               event.device = &shca->ib_device;
+               event.event = IB_EVENT_PORT_ERR;
+               event.element.port_num = port_num;
+               shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
+               ib_dispatch_event(&event);
+       }
+
+       ipz_queue_dtor(&my_qp->ipz_rqueue);
+       ipz_queue_dtor(&my_qp->ipz_squeue);
+       kmem_cache_free(qp_cache, my_qp);
+       return 0;
+}
+
+int ehca_init_qp_cache(void)
+{
+       qp_cache = kmem_cache_create("ehca_cache_qp",
+                                    sizeof(struct ehca_qp), 0,
+                                    SLAB_HWCACHE_ALIGN,
+                                    NULL, NULL);
+       if (!qp_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void ehca_cleanup_qp_cache(void)
+{
+       if (qp_cache)
+               kmem_cache_destroy(qp_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
new file mode 100644 (file)
index 0000000..b46bda1
--- /dev/null
@@ -0,0 +1,653 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  post_send/recv, poll_cq, req_notify
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm-powerpc/system.h>
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+#include "hipz_fns.h"
+
+static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
+                                 struct ehca_wqe *wqe_p,
+                                 struct ib_recv_wr *recv_wr)
+{
+       u8 cnt_ds;
+       if (unlikely((recv_wr->num_sge < 0) ||
+                    (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
+               ehca_gen_err("Invalid number of WQE SGE. "
+                        "num_sqe=%x max_nr_of_sg=%x",
+                        recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
+               return -EINVAL; /* invalid SG list length */
+       }
+
+       /* clear wqe header until sglist */
+       memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
+
+       wqe_p->work_request_id = recv_wr->wr_id;
+       wqe_p->nr_of_data_seg = recv_wr->num_sge;
+
+       for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
+               wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
+                       recv_wr->sg_list[cnt_ds].addr;
+               wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
+                       recv_wr->sg_list[cnt_ds].lkey;
+               wqe_p->u.all_rcv.sg_list[cnt_ds].length =
+                       recv_wr->sg_list[cnt_ds].length;
+       }
+
+       if (ehca_debug_level) {
+               ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
+               ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
+       }
+
+       return 0;
+}
+
+#if defined(DEBUG_GSI_SEND_WR)
+
+/* need ib_mad struct */
+#include <rdma/ib_mad.h>
+
+static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
+{
+       int idx;
+       int j;
+       while (send_wr) {
+               struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
+               struct ib_sge *sge = send_wr->sg_list;
+               ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
+                            "send_flags=%x opcode=%x",idx, send_wr->wr_id,
+                            send_wr->num_sge, send_wr->send_flags,
+                            send_wr->opcode);
+               if (mad_hdr) {
+                       ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
+                                    "mgmt_class=%x class_version=%x method=%x "
+                                    "status=%x class_specific=%x tid=%lx "
+                                    "attr_id=%x resv=%x attr_mod=%x",
+                                    idx, mad_hdr->base_version,
+                                    mad_hdr->mgmt_class,
+                                    mad_hdr->class_version, mad_hdr->method,
+                                    mad_hdr->status, mad_hdr->class_specific,
+                                    mad_hdr->tid, mad_hdr->attr_id,
+                                    mad_hdr->resv,
+                                    mad_hdr->attr_mod);
+               }
+               for (j = 0; j < send_wr->num_sge; j++) {
+                       u8 *data = (u8 *) abs_to_virt(sge->addr);
+                       ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
+                                    "lkey=%x",
+                                    idx, j, data, sge->length, sge->lkey);
+                       /* assume length is n*16 */
+                       ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
+                                idx, j);
+                       sge++;
+               } /* eof for j */
+               idx++;
+               send_wr = send_wr->next;
+       } /* eof while send_wr */
+}
+
+#endif /* DEBUG_GSI_SEND_WR */
+
+static inline int ehca_write_swqe(struct ehca_qp *qp,
+                                 struct ehca_wqe *wqe_p,
+                                 const struct ib_send_wr *send_wr)
+{
+       u32 idx;
+       u64 dma_length;
+       struct ehca_av *my_av;
+       u32 remote_qkey = send_wr->wr.ud.remote_qkey;
+
+       if (unlikely((send_wr->num_sge < 0) ||
+                    (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
+               ehca_gen_err("Invalid number of WQE SGE. "
+                        "num_sqe=%x max_nr_of_sg=%x",
+                        send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
+               return -EINVAL; /* invalid SG list length */
+       }
+
+       /* clear wqe header until sglist */
+       memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
+
+       wqe_p->work_request_id = send_wr->wr_id;
+
+       switch (send_wr->opcode) {
+       case IB_WR_SEND:
+       case IB_WR_SEND_WITH_IMM:
+               wqe_p->optype = WQE_OPTYPE_SEND;
+               break;
+       case IB_WR_RDMA_WRITE:
+       case IB_WR_RDMA_WRITE_WITH_IMM:
+               wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
+               break;
+       case IB_WR_RDMA_READ:
+               wqe_p->optype = WQE_OPTYPE_RDMAREAD;
+               break;
+       default:
+               ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
+               return -EINVAL; /* invalid opcode */
+       }
+
+       wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
+
+       wqe_p->wr_flag = 0;
+
+       if (send_wr->send_flags & IB_SEND_SIGNALED)
+               wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
+
+       if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
+           send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
+               /* this might not work as long as HW does not support it */
+               wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
+               wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
+       }
+
+       wqe_p->nr_of_data_seg = send_wr->num_sge;
+
+       switch (qp->qp_type) {
+       case IB_QPT_SMI:
+       case IB_QPT_GSI:
+               /* no break is intential here */
+       case IB_QPT_UD:
+               /* IB 1.2 spec C10-15 compliance */
+               if (send_wr->wr.ud.remote_qkey & 0x80000000)
+                       remote_qkey = qp->qkey;
+
+               wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
+               wqe_p->local_ee_context_qkey = remote_qkey;
+               if (!send_wr->wr.ud.ah) {
+                       ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
+                       return -EINVAL;
+               }
+               my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
+               wqe_p->u.ud_av.ud_av = my_av->av;
+
+               /*
+                * omitted check of IB_SEND_INLINE
+                * since HW does not support it
+                */
+               for (idx = 0; idx < send_wr->num_sge; idx++) {
+                       wqe_p->u.ud_av.sg_list[idx].vaddr =
+                               send_wr->sg_list[idx].addr;
+                       wqe_p->u.ud_av.sg_list[idx].lkey =
+                               send_wr->sg_list[idx].lkey;
+                       wqe_p->u.ud_av.sg_list[idx].length =
+                               send_wr->sg_list[idx].length;
+               } /* eof for idx */
+               if (qp->qp_type == IB_QPT_SMI ||
+                   qp->qp_type == IB_QPT_GSI)
+                       wqe_p->u.ud_av.ud_av.pmtu = 1;
+               if (qp->qp_type == IB_QPT_GSI) {
+                       wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
+#ifdef DEBUG_GSI_SEND_WR
+                       trace_send_wr_ud(send_wr);
+#endif /* DEBUG_GSI_SEND_WR */
+               }
+               break;
+
+       case IB_QPT_UC:
+               if (send_wr->send_flags & IB_SEND_FENCE)
+                       wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
+               /* no break is intentional here */
+       case IB_QPT_RC:
+               /* TODO: atomic not implemented */
+               wqe_p->u.nud.remote_virtual_adress =
+                       send_wr->wr.rdma.remote_addr;
+               wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
+
+               /*
+                * omitted checking of IB_SEND_INLINE
+                * since HW does not support it
+                */
+               dma_length = 0;
+               for (idx = 0; idx < send_wr->num_sge; idx++) {
+                       wqe_p->u.nud.sg_list[idx].vaddr =
+                               send_wr->sg_list[idx].addr;
+                       wqe_p->u.nud.sg_list[idx].lkey =
+                               send_wr->sg_list[idx].lkey;
+                       wqe_p->u.nud.sg_list[idx].length =
+                               send_wr->sg_list[idx].length;
+                       dma_length += send_wr->sg_list[idx].length;
+               } /* eof idx */
+               wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
+
+               break;
+
+       default:
+               ehca_gen_err("Invalid qptype=%x", qp->qp_type);
+               return -EINVAL;
+       }
+
+       if (ehca_debug_level) {
+               ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
+               ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
+       }
+       return 0;
+}
+
+/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
+static inline void map_ib_wc_status(u32 cqe_status,
+                                   enum ib_wc_status *wc_status)
+{
+       if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
+               switch (cqe_status & 0x3F) {
+               case 0x01:
+               case 0x21:
+                       *wc_status = IB_WC_LOC_LEN_ERR;
+                       break;
+               case 0x02:
+               case 0x22:
+                       *wc_status = IB_WC_LOC_QP_OP_ERR;
+                       break;
+               case 0x03:
+               case 0x23:
+                       *wc_status = IB_WC_LOC_EEC_OP_ERR;
+                       break;
+               case 0x04:
+               case 0x24:
+                       *wc_status = IB_WC_LOC_PROT_ERR;
+                       break;
+               case 0x05:
+               case 0x25:
+                       *wc_status = IB_WC_WR_FLUSH_ERR;
+                       break;
+               case 0x06:
+                       *wc_status = IB_WC_MW_BIND_ERR;
+                       break;
+               case 0x07: /* remote error - look into bits 20:24 */
+                       switch ((cqe_status
+                                & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
+                       case 0x0:
+                               /*
+                                * PSN Sequence Error!
+                                * couldn't find a matching status!
+                                */
+                               *wc_status = IB_WC_GENERAL_ERR;
+                               break;
+                       case 0x1:
+                               *wc_status = IB_WC_REM_INV_REQ_ERR;
+                               break;
+                       case 0x2:
+                               *wc_status = IB_WC_REM_ACCESS_ERR;
+                               break;
+                       case 0x3:
+                               *wc_status = IB_WC_REM_OP_ERR;
+                               break;
+                       case 0x4:
+                               *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+                               break;
+                       }
+                       break;
+               case 0x08:
+                       *wc_status = IB_WC_RETRY_EXC_ERR;
+                       break;
+               case 0x09:
+                       *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+                       break;
+               case 0x0A:
+               case 0x2D:
+                       *wc_status = IB_WC_REM_ABORT_ERR;
+                       break;
+               case 0x0B:
+               case 0x2E:
+                       *wc_status = IB_WC_INV_EECN_ERR;
+                       break;
+               case 0x0C:
+               case 0x2F:
+                       *wc_status = IB_WC_INV_EEC_STATE_ERR;
+                       break;
+               case 0x0D:
+                       *wc_status = IB_WC_BAD_RESP_ERR;
+                       break;
+               case 0x10:
+                       /* WQE purged */
+                       *wc_status = IB_WC_WR_FLUSH_ERR;
+                       break;
+               default:
+                       *wc_status = IB_WC_FATAL_ERR;
+
+               }
+       } else
+               *wc_status = IB_WC_SUCCESS;
+}
+
+int ehca_post_send(struct ib_qp *qp,
+                  struct ib_send_wr *send_wr,
+                  struct ib_send_wr **bad_send_wr)
+{
+       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+       struct ib_send_wr *cur_send_wr;
+       struct ehca_wqe *wqe_p;
+       int wqe_cnt = 0;
+       int ret = 0;
+       unsigned long spl_flags;
+
+       /* LOCK the QUEUE */
+       spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+
+       /* loop processes list of send reqs */
+       for (cur_send_wr = send_wr; cur_send_wr != NULL;
+            cur_send_wr = cur_send_wr->next) {
+               u64 start_offset = my_qp->ipz_squeue.current_q_offset;
+               /* get pointer next to free WQE */
+               wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
+               if (unlikely(!wqe_p)) {
+                       /* too many posted work requests: queue overflow */
+                       if (bad_send_wr)
+                               *bad_send_wr = cur_send_wr;
+                       if (wqe_cnt == 0) {
+                               ret = -ENOMEM;
+                               ehca_err(qp->device, "Too many posted WQEs "
+                                        "qp_num=%x", qp->qp_num);
+                       }
+                       goto post_send_exit0;
+               }
+               /* write a SEND WQE into the QUEUE */
+               ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
+               /*
+                * if something failed,
+                * reset the free entry pointer to the start value
+                */
+               if (unlikely(ret)) {
+                       my_qp->ipz_squeue.current_q_offset = start_offset;
+                       *bad_send_wr = cur_send_wr;
+                       if (wqe_cnt == 0) {
+                               ret = -EINVAL;
+                               ehca_err(qp->device, "Could not write WQE "
+                                        "qp_num=%x", qp->qp_num);
+                       }
+                       goto post_send_exit0;
+               }
+               wqe_cnt++;
+               ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+                        my_qp, qp->qp_num, wqe_cnt);
+       } /* eof for cur_send_wr */
+
+post_send_exit0:
+       /* UNLOCK the QUEUE */
+       spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
+       iosync(); /* serialize GAL register access */
+       hipz_update_sqa(my_qp, wqe_cnt);
+       return ret;
+}
+
+int ehca_post_recv(struct ib_qp *qp,
+                  struct ib_recv_wr *recv_wr,
+                  struct ib_recv_wr **bad_recv_wr)
+{
+       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+       struct ib_recv_wr *cur_recv_wr;
+       struct ehca_wqe *wqe_p;
+       int wqe_cnt = 0;
+       int ret = 0;
+       unsigned long spl_flags;
+
+       /* LOCK the QUEUE */
+       spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
+
+       /* loop processes list of send reqs */
+       for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
+            cur_recv_wr = cur_recv_wr->next) {
+               u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
+               /* get pointer next to free WQE */
+               wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
+               if (unlikely(!wqe_p)) {
+                       /* too many posted work requests: queue overflow */
+                       if (bad_recv_wr)
+                               *bad_recv_wr = cur_recv_wr;
+                       if (wqe_cnt == 0) {
+                               ret = -ENOMEM;
+                               ehca_err(qp->device, "Too many posted WQEs "
+                                        "qp_num=%x", qp->qp_num);
+                       }
+                       goto post_recv_exit0;
+               }
+               /* write a RECV WQE into the QUEUE */
+               ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
+               /*
+                * if something failed,
+                * reset the free entry pointer to the start value
+                */
+               if (unlikely(ret)) {
+                       my_qp->ipz_rqueue.current_q_offset = start_offset;
+                       *bad_recv_wr = cur_recv_wr;
+                       if (wqe_cnt == 0) {
+                               ret = -EINVAL;
+                               ehca_err(qp->device, "Could not write WQE "
+                                        "qp_num=%x", qp->qp_num);
+                       }
+                       goto post_recv_exit0;
+               }
+               wqe_cnt++;
+               ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
+                    my_qp, qp->qp_num, wqe_cnt);
+       } /* eof for cur_recv_wr */
+
+post_recv_exit0:
+       spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
+       iosync(); /* serialize GAL register access */
+       hipz_update_rqa(my_qp, wqe_cnt);
+       return ret;
+}
+
+/*
+ * ib_wc_opcode table converts ehca wc opcode to ib
+ * Since we use zero to indicate invalid opcode, the actual ib opcode must
+ * be decremented!!!
+ */
+static const u8 ib_wc_opcode[255] = {
+       [0x01] = IB_WC_RECV+1,
+       [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
+       [0x04] = IB_WC_BIND_MW+1,
+       [0x08] = IB_WC_FETCH_ADD+1,
+       [0x10] = IB_WC_COMP_SWAP+1,
+       [0x20] = IB_WC_RDMA_WRITE+1,
+       [0x40] = IB_WC_RDMA_READ+1,
+       [0x80] = IB_WC_SEND+1
+};
+
+/* internal function to poll one entry of cq */
+static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
+{
+       int ret = 0;
+       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+       struct ehca_cqe *cqe;
+       int cqe_count = 0;
+
+poll_cq_one_read_cqe:
+       cqe = (struct ehca_cqe *)
+               ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
+       if (!cqe) {
+               ret = -EAGAIN;
+               ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
+                        "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
+               goto  poll_cq_one_exit0;
+       }
+
+       /* prevents loads being reordered across this point */
+       rmb();
+
+       cqe_count++;
+       if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
+               struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
+               int purgeflag;
+               unsigned long spl_flags;
+               if (!qp) {
+                       ehca_err(cq->device, "cq_num=%x qp_num=%x "
+                                "could not find qp -> ignore cqe",
+                                my_cq->cq_number, cqe->local_qp_number);
+                       ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
+                                my_cq->cq_number, cqe->local_qp_number);
+                       /* ignore this purged cqe */
+                       goto poll_cq_one_read_cqe;
+               }
+               spin_lock_irqsave(&qp->spinlock_s, spl_flags);
+               purgeflag = qp->sqerr_purgeflag;
+               spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
+
+               if (purgeflag) {
+                       ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
+                                "src_qp=%x",
+                                cqe->local_qp_number, cqe->remote_qp_number);
+                       if (ehca_debug_level)
+                               ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
+                                        cqe->local_qp_number,
+                                        cqe->remote_qp_number);
+                       /*
+                        * ignore this to avoid double cqes of bad wqe
+                        * that caused sqe and turn off purge flag
+                        */
+                       qp->sqerr_purgeflag = 0;
+                       goto poll_cq_one_read_cqe;
+               }
+       }
+
+       /* tracing cqe */
+       if (ehca_debug_level) {
+               ehca_dbg(cq->device,
+                        "Received COMPLETION ehca_cq=%p cq_num=%x -----",
+                        my_cq, my_cq->cq_number);
+               ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
+                        my_cq, my_cq->cq_number);
+               ehca_dbg(cq->device,
+                        "ehca_cq=%p cq_num=%x -------------------------",
+                        my_cq, my_cq->cq_number);
+       }
+
+       /* we got a completion! */
+       wc->wr_id = cqe->work_request_id;
+
+       /* eval ib_wc_opcode */
+       wc->opcode = ib_wc_opcode[cqe->optype]-1;
+       if (unlikely(wc->opcode == -1)) {
+               ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
+                        "ehca_cq=%p cq_num=%x",
+                        cqe->optype, cqe->status, my_cq, my_cq->cq_number);
+               /* dump cqe for other infos */
+               ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
+                        my_cq, my_cq->cq_number);
+               /* update also queue adder to throw away this entry!!! */
+               goto poll_cq_one_exit0;
+       }
+       /* eval ib_wc_status */
+       if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
+               /* complete with errors */
+               map_ib_wc_status(cqe->status, &wc->status);
+               wc->vendor_err = wc->status;
+       } else
+               wc->status = IB_WC_SUCCESS;
+
+       wc->qp_num = cqe->local_qp_number;
+       wc->byte_len = cqe->nr_bytes_transferred;
+       wc->pkey_index = cqe->pkey_index;
+       wc->slid = cqe->rlid;
+       wc->dlid_path_bits = cqe->dlid;
+       wc->src_qp = cqe->remote_qp_number;
+       wc->wc_flags = cqe->w_completion_flags;
+       wc->imm_data = cpu_to_be32(cqe->immediate_data);
+       wc->sl = cqe->service_level;
+
+       if (wc->status != IB_WC_SUCCESS)
+               ehca_dbg(cq->device,
+                        "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
+                        "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
+                        "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
+                        cqe->status, cqe->local_qp_number,
+                        cqe->remote_qp_number, cqe->work_request_id, cqe);
+
+poll_cq_one_exit0:
+       if (cqe_count > 0)
+               hipz_update_feca(my_cq, cqe_count);
+
+       return ret;
+}
+
+int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
+{
+       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+       int nr;
+       struct ib_wc *current_wc = wc;
+       int ret = 0;
+       unsigned long spl_flags;
+
+       if (num_entries < 1) {
+               ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
+                        "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
+               ret = -EINVAL;
+               goto poll_cq_exit0;
+       }
+
+       spin_lock_irqsave(&my_cq->spinlock, spl_flags);
+       for (nr = 0; nr < num_entries; nr++) {
+               ret = ehca_poll_cq_one(cq, current_wc);
+               if (ret)
+                       break;
+               current_wc++;
+       } /* eof for nr */
+       spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
+       if (ret == -EAGAIN  || !ret)
+               ret = nr;
+
+poll_cq_exit0:
+       return ret;
+}
+
+int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
+{
+       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+
+       switch (cq_notify) {
+       case IB_CQ_SOLICITED:
+               hipz_set_cqx_n0(my_cq, 1);
+               break;
+       case IB_CQ_NEXT_COMP:
+               hipz_set_cqx_n1(my_cq, 1);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
new file mode 100644 (file)
index 0000000..9f16e9c
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  SQP functions
+ *
+ *  Authors: Khadija Souissi <souissi@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+
+
+/**
+ * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
+ * pair is created successfully, the corresponding port gets active.
+ *
+ * Define Special Queue pair 0 (SMI QP) is still not supported.
+ *
+ * @qp_init_attr: Queue pair init attributes with port and queue pair type
+ */
+
+u64 ehca_define_sqp(struct ehca_shca *shca,
+                   struct ehca_qp *ehca_qp,
+                   struct ib_qp_init_attr *qp_init_attr)
+{
+       u32 pma_qp_nr, bma_qp_nr;
+       u64 ret;
+       u8 port = qp_init_attr->port_num;
+       int counter;
+
+       shca->sport[port - 1].port_state = IB_PORT_DOWN;
+
+       switch (qp_init_attr->qp_type) {
+       case IB_QPT_SMI:
+               /* function not supported yet */
+               break;
+       case IB_QPT_GSI:
+               ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
+                                        ehca_qp->ipz_qp_handle,
+                                        ehca_qp->galpas.kernel,
+                                        (u32) qp_init_attr->port_num,
+                                        &pma_qp_nr, &bma_qp_nr);
+
+               if (ret != H_SUCCESS) {
+                       ehca_err(&shca->ib_device,
+                                "Can't define AQP1 for port %x. rc=%lx",
+                                port, ret);
+                       return ret;
+               }
+               break;
+       default:
+               ehca_err(&shca->ib_device, "invalid qp_type=%x",
+                        qp_init_attr->qp_type);
+               return H_PARAMETER;
+       }
+
+       for (counter = 0;
+            shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
+                    counter < ehca_port_act_time;
+            counter++) {
+               ehca_dbg(&shca->ib_device, "... wait until port %x is active",
+                        port);
+               msleep_interruptible(1000);
+       }
+
+       if (counter == ehca_port_act_time) {
+               ehca_err(&shca->ib_device, "Port %x is not active.", port);
+               return H_HARDWARE;
+       }
+
+       return H_SUCCESS;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
new file mode 100644 (file)
index 0000000..9f56bb8
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  auxiliary functions
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Khadija Souissi <souissik@de.ibm.com>
+ *           Waleri Fomin <fomin@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef EHCA_TOOLS_H
+#define EHCA_TOOLS_H
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/abs_addr.h>
+#include <asm/ibmebus.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+extern int ehca_debug_level;
+
+#define ehca_dbg(ib_dev, format, arg...) \
+       do { \
+               if (unlikely(ehca_debug_level)) \
+                       dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
+                                  "PU%04x EHCA_DBG:%s " format "\n", \
+                                  get_paca()->paca_index, __FUNCTION__, \
+                                  ## arg); \
+       } while (0)
+
+#define ehca_info(ib_dev, format, arg...) \
+       dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
+                get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_warn(ib_dev, format, arg...) \
+       dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
+                get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_err(ib_dev, format, arg...) \
+       dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
+               get_paca()->paca_index, __FUNCTION__, ## arg)
+
+/* use this one only if no ib_dev available */
+#define ehca_gen_dbg(format, arg...) \
+       do { \
+               if (unlikely(ehca_debug_level)) \
+                       printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
+                              get_paca()->paca_index, __FUNCTION__, ## arg); \
+       } while (0)
+
+#define ehca_gen_warn(format, arg...) \
+       do { \
+               if (unlikely(ehca_debug_level)) \
+                       printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
+                              get_paca()->paca_index, __FUNCTION__, ## arg); \
+       } while (0)
+
+#define ehca_gen_err(format, arg...) \
+       printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
+               get_paca()->paca_index, __FUNCTION__, ## arg)
+
+/**
+ * ehca_dmp - printk a memory block, whose length is n*8 bytes.
+ * Each line has the following layout:
+ * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
+ */
+#define ehca_dmp(adr, len, format, args...) \
+       do {                                   \
+               unsigned int x;                       \
+               unsigned int l = (unsigned int)(len); \
+               unsigned char *deb = (unsigned char*)(adr);     \
+               for (x = 0; x < l; x += 16) { \
+                       printk("EHCA_DMP:%s" format \
+                              " adr=%p ofs=%04x %016lx %016lx\n", \
+                              __FUNCTION__, ##args, deb, x, \
+                              *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
+                       deb += 16; \
+               } \
+       } while (0)
+
+/* define a bitmask, little endian version */
+#define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
+
+/* define a bitmask, the ibm way... */
+#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
+
+/* internal function, don't use */
+#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
+
+/* internal function, don't use */
+#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
+
+/**
+ * EHCA_BMASK_SET - return value shifted and masked by mask
+ * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
+ * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
+ * in variable
+ */
+#define EHCA_BMASK_SET(mask,value) \
+       ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
+
+/**
+ * EHCA_BMASK_GET - extract a parameter from value by mask
+ */
+#define EHCA_BMASK_GET(mask,value) \
+       (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
+
+
+/* Converts ehca to ib return code */
+static inline int ehca2ib_return_code(u64 ehca_rc)
+{
+       switch (ehca_rc) {
+       case H_SUCCESS:
+               return 0;
+       case H_BUSY:
+               return -EBUSY;
+       case H_NO_MEM:
+               return -ENOMEM;
+       default:
+               return -EINVAL;
+       }
+}
+
+
+#endif /* EHCA_TOOLS_H */
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
new file mode 100644 (file)
index 0000000..e08764e
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  userspace support verbs
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_classes.h"
+#include "ehca_iverbs.h"
+#include "ehca_mrmw.h"
+#include "ehca_tools.h"
+#include "hcp_if.h"
+
+struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
+                                       struct ib_udata *udata)
+{
+       struct ehca_ucontext *my_context;
+
+       my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
+       if (!my_context) {
+               ehca_err(device, "Out of memory device=%p", device);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return &my_context->ib_ucontext;
+}
+
+int ehca_dealloc_ucontext(struct ib_ucontext *context)
+{
+       kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
+       return 0;
+}
+
+struct page *ehca_nopage(struct vm_area_struct *vma,
+                        unsigned long address, int *type)
+{
+       struct page *mypage = NULL;
+       u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
+       u32 idr_handle = fileoffset >> 32;
+       u32 q_type = (fileoffset >> 28) & 0xF;    /* CQ, QP,...        */
+       u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
+       u32 cur_pid = current->tgid;
+       unsigned long flags;
+       struct ehca_cq *cq;
+       struct ehca_qp *qp;
+       struct ehca_pd *pd;
+       u64 offset;
+       void *vaddr;
+
+       switch (q_type) {
+       case 1: /* CQ */
+               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               cq = idr_find(&ehca_cq_idr, idr_handle);
+               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+               /* make sure this mmap really belongs to the authorized user */
+               if (!cq) {
+                       ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
+                       return NOPAGE_SIGBUS;
+               }
+
+               if (cq->ownpid != cur_pid) {
+                       ehca_err(cq->ib_cq.device,
+                                "Invalid caller pid=%x ownpid=%x",
+                                cur_pid, cq->ownpid);
+                       return NOPAGE_SIGBUS;
+               }
+
+               if (rsrc_type == 2) {
+                       ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
+                       offset = address - vma->vm_start;
+                       vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
+                       ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
+                                offset, vaddr);
+                       mypage = virt_to_page(vaddr);
+               }
+               break;
+
+       case 2: /* QP */
+               spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+               qp = idr_find(&ehca_qp_idr, idr_handle);
+               spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+               /* make sure this mmap really belongs to the authorized user */
+               if (!qp) {
+                       ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
+                       return NOPAGE_SIGBUS;
+               }
+
+               pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
+               if (pd->ownpid != cur_pid) {
+                       ehca_err(qp->ib_qp.device,
+                                "Invalid caller pid=%x ownpid=%x",
+                                cur_pid, pd->ownpid);
+                       return NOPAGE_SIGBUS;
+               }
+
+               if (rsrc_type == 2) {   /* rqueue */
+                       ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
+                       offset = address - vma->vm_start;
+                       vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
+                       ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+                                offset, vaddr);
+                       mypage = virt_to_page(vaddr);
+               } else if (rsrc_type == 3) {    /* squeue */
+                       ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
+                       offset = address - vma->vm_start;
+                       vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
+                       ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+                                offset, vaddr);
+                       mypage = virt_to_page(vaddr);
+               }
+               break;
+
+       default:
+               ehca_gen_err("bad queue type %x", q_type);
+               return NOPAGE_SIGBUS;
+       }
+
+       if (!mypage) {
+               ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
+               return NOPAGE_SIGBUS;
+       }
+       get_page(mypage);
+
+       return mypage;
+}
+
+static struct vm_operations_struct ehcau_vm_ops = {
+       .nopage = ehca_nopage,
+};
+
+int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+       u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
+       u32 idr_handle = fileoffset >> 32;
+       u32 q_type = (fileoffset >> 28) & 0xF;    /* CQ, QP,...        */
+       u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
+       u32 cur_pid = current->tgid;
+       u32 ret;
+       u64 vsize, physical;
+       unsigned long flags;
+       struct ehca_cq *cq;
+       struct ehca_qp *qp;
+       struct ehca_pd *pd;
+
+       switch (q_type) {
+       case  1: /* CQ */
+               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               cq = idr_find(&ehca_cq_idr, idr_handle);
+               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+               /* make sure this mmap really belongs to the authorized user */
+               if (!cq)
+                       return -EINVAL;
+
+               if (cq->ownpid != cur_pid) {
+                       ehca_err(cq->ib_cq.device,
+                                "Invalid caller pid=%x ownpid=%x",
+                                cur_pid, cq->ownpid);
+                       return -ENOMEM;
+               }
+
+               if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
+                       return -EINVAL;
+
+               switch (rsrc_type) {
+               case 1: /* galpa fw handle */
+                       ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
+                       vma->vm_flags |= VM_RESERVED;
+                       vsize = vma->vm_end - vma->vm_start;
+                       if (vsize != EHCA_PAGESIZE) {
+                               ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
+                                        vma->vm_end - vma->vm_start);
+                               return -EINVAL;
+                       }
+
+                       physical = cq->galpas.user.fw_handle;
+                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+                       vma->vm_flags |= VM_IO | VM_RESERVED;
+
+                       ehca_dbg(cq->ib_cq.device,
+                                "vsize=%lx physical=%lx", vsize, physical);
+                       ret = remap_pfn_range(vma, vma->vm_start,
+                                             physical >> PAGE_SHIFT, vsize,
+                                             vma->vm_page_prot);
+                       if (ret) {
+                               ehca_err(cq->ib_cq.device,
+                                        "remap_pfn_range() failed ret=%x",
+                                        ret);
+                               return -ENOMEM;
+                       }
+                       break;
+
+               case 2: /* cq queue_addr */
+                       ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
+                       vma->vm_flags |= VM_RESERVED;
+                       vma->vm_ops = &ehcau_vm_ops;
+                       break;
+
+               default:
+                       ehca_err(cq->ib_cq.device, "bad resource type %x",
+                                rsrc_type);
+                       return -EINVAL;
+               }
+               break;
+
+       case 2: /* QP */
+               spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+               qp = idr_find(&ehca_qp_idr, idr_handle);
+               spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+               /* make sure this mmap really belongs to the authorized user */
+               if (!qp)
+                       return -EINVAL;
+
+               pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
+               if (pd->ownpid != cur_pid) {
+                       ehca_err(qp->ib_qp.device,
+                                "Invalid caller pid=%x ownpid=%x",
+                                cur_pid, pd->ownpid);
+                       return -ENOMEM;
+               }
+
+               if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
+                       return -EINVAL;
+
+               switch (rsrc_type) {
+               case 1: /* galpa fw handle */
+                       ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
+                       vma->vm_flags |= VM_RESERVED;
+                       vsize = vma->vm_end - vma->vm_start;
+                       if (vsize != EHCA_PAGESIZE) {
+                               ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
+                                        vma->vm_end - vma->vm_start);
+                               return -EINVAL;
+                       }
+
+                       physical = qp->galpas.user.fw_handle;
+                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+                       vma->vm_flags |= VM_IO | VM_RESERVED;
+
+                       ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
+                                vsize, physical);
+                       ret = remap_pfn_range(vma, vma->vm_start,
+                                             physical >> PAGE_SHIFT, vsize,
+                                             vma->vm_page_prot);
+                       if (ret) {
+                               ehca_err(qp->ib_qp.device,
+                                        "remap_pfn_range() failed ret=%x",
+                                        ret);
+                               return -ENOMEM;
+                       }
+                       break;
+
+               case 2: /* qp rqueue_addr */
+                       ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
+                       vma->vm_flags |= VM_RESERVED;
+                       vma->vm_ops = &ehcau_vm_ops;
+                       break;
+
+               case 3: /* qp squeue_addr */
+                       ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
+                       vma->vm_flags |= VM_RESERVED;
+                       vma->vm_ops = &ehcau_vm_ops;
+                       break;
+
+               default:
+                       ehca_err(qp->ib_qp.device, "bad resource type %x",
+                                rsrc_type);
+                       return -EINVAL;
+               }
+               break;
+
+       default:
+               ehca_gen_err("bad queue type %x", q_type);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
+                    struct vm_area_struct **vma)
+{
+       down_write(&current->mm->mmap_sem);
+       *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
+                                MAP_SHARED | MAP_ANONYMOUS,
+                                foffset);
+       up_write(&current->mm->mmap_sem);
+       if (!(*mapped)) {
+               ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
+                            foffset, length);
+               return -EINVAL;
+       }
+
+       *vma = find_vma(current->mm, (u64)*mapped);
+       if (!(*vma)) {
+               down_write(&current->mm->mmap_sem);
+               do_munmap(current->mm, 0, length);
+               up_write(&current->mm->mmap_sem);
+               ehca_gen_err("couldn't find vma queue=%p", *mapped);
+               return -EINVAL;
+       }
+       (*vma)->vm_flags |= VM_RESERVED;
+       (*vma)->vm_ops = &ehcau_vm_ops;
+
+       return 0;
+}
+
+int ehca_mmap_register(u64 physical, void **mapped,
+                      struct vm_area_struct **vma)
+{
+       int ret;
+       unsigned long vsize;
+       /* ehca hw supports only 4k page */
+       ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
+       if (ret) {
+               ehca_gen_err("could'nt mmap physical=%lx", physical);
+               return ret;
+       }
+
+       (*vma)->vm_flags |= VM_RESERVED;
+       vsize = (*vma)->vm_end - (*vma)->vm_start;
+       if (vsize != EHCA_PAGESIZE) {
+               ehca_gen_err("invalid vsize=%lx",
+                            (*vma)->vm_end - (*vma)->vm_start);
+               return -EINVAL;
+       }
+
+       (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
+       (*vma)->vm_flags |= VM_IO | VM_RESERVED;
+
+       ret = remap_pfn_range((*vma), (*vma)->vm_start,
+                             physical >> PAGE_SHIFT, vsize,
+                             (*vma)->vm_page_prot);
+       if (ret) {
+               ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
+               return -ENOMEM;
+       }
+
+       return 0;
+
+}
+
+int ehca_munmap(unsigned long addr, size_t len) {
+       int ret = 0;
+       struct mm_struct *mm = current->mm;
+       if (mm) {
+               down_write(&mm->mmap_sem);
+               ret = do_munmap(mm, addr, len);
+               up_write(&mm->mmap_sem);
+       }
+       return ret;
+}
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
new file mode 100644 (file)
index 0000000..3fb46e6
--- /dev/null
@@ -0,0 +1,874 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Firmware Infiniband Interface code for POWER
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Gerd Bayer <gerd.bayer@de.ibm.com>
+ *           Waleri Fomin <fomin@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/hvcall.h>
+#include "ehca_tools.h"
+#include "hcp_if.h"
+#include "hcp_phyp.h"
+#include "hipz_fns.h"
+#include "ipz_pt_fn.h"
+
+#define H_ALL_RES_QP_ENHANCED_OPS       EHCA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_PTE_PIN            EHCA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_SERVICE_TYPE       EHCA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_LL_RQ_CQE_POSTING  EHCA_BMASK_IBM(18, 18)
+#define H_ALL_RES_QP_LL_SQ_CQE_POSTING  EHCA_BMASK_IBM(19, 21)
+#define H_ALL_RES_QP_SIGNALING_TYPE     EHCA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_UD_AV_LKEY_CTRL    EHCA_BMASK_IBM(31, 31)
+#define H_ALL_RES_QP_RESOURCE_TYPE      EHCA_BMASK_IBM(56, 63)
+
+#define H_ALL_RES_QP_MAX_OUTST_SEND_WR  EHCA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_MAX_OUTST_RECV_WR  EHCA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_MAX_SEND_SGE       EHCA_BMASK_IBM(32, 39)
+#define H_ALL_RES_QP_MAX_RECV_SGE       EHCA_BMASK_IBM(40, 47)
+
+#define H_ALL_RES_QP_ACT_OUTST_SEND_WR  EHCA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_OUTST_RECV_WR  EHCA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_ACT_SEND_SGE       EHCA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_RECV_SGE       EHCA_BMASK_IBM(24, 31)
+
+#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(32, 63)
+
+/* direct access qp controls */
+#define DAQP_CTRL_ENABLE    0x01
+#define DAQP_CTRL_SEND_COMP 0x20
+#define DAQP_CTRL_RECV_COMP 0x40
+
+static u32 get_longbusy_msecs(int longbusy_rc)
+{
+       switch (longbusy_rc) {
+       case H_LONG_BUSY_ORDER_1_MSEC:
+               return 1;
+       case H_LONG_BUSY_ORDER_10_MSEC:
+               return 10;
+       case H_LONG_BUSY_ORDER_100_MSEC:
+               return 100;
+       case H_LONG_BUSY_ORDER_1_SEC:
+               return 1000;
+       case H_LONG_BUSY_ORDER_10_SEC:
+               return 10000;
+       case H_LONG_BUSY_ORDER_100_SEC:
+               return 100000;
+       default:
+               return 1;
+       }
+}
+
+static long ehca_plpar_hcall_norets(unsigned long opcode,
+                                   unsigned long arg1,
+                                   unsigned long arg2,
+                                   unsigned long arg3,
+                                   unsigned long arg4,
+                                   unsigned long arg5,
+                                   unsigned long arg6,
+                                   unsigned long arg7)
+{
+       long ret;
+       int i, sleep_msecs;
+
+       ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
+                    "arg5=%lx arg6=%lx arg7=%lx",
+                    opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+
+       for (i = 0; i < 5; i++) {
+               ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
+                                        arg5, arg6, arg7);
+
+               if (H_IS_LONG_BUSY(ret)) {
+                       sleep_msecs = get_longbusy_msecs(ret);
+                       msleep_interruptible(sleep_msecs);
+                       continue;
+               }
+
+               if (ret < H_SUCCESS)
+                       ehca_gen_err("opcode=%lx ret=%lx"
+                                    " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+                                    " arg5=%lx arg6=%lx arg7=%lx ",
+                                    opcode, ret,
+                                    arg1, arg2, arg3, arg4, arg5,
+                                    arg6, arg7);
+
+               ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret);
+               return ret;
+
+       }
+
+       return H_BUSY;
+}
+
+static long ehca_plpar_hcall9(unsigned long opcode,
+                             unsigned long *outs, /* array of 9 outputs */
+                             unsigned long arg1,
+                             unsigned long arg2,
+                             unsigned long arg3,
+                             unsigned long arg4,
+                             unsigned long arg5,
+                             unsigned long arg6,
+                             unsigned long arg7,
+                             unsigned long arg8,
+                             unsigned long arg9)
+{
+       long ret;
+       int i, sleep_msecs;
+
+       ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
+                    "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
+                    opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
+                    arg8, arg9);
+
+       for (i = 0; i < 5; i++) {
+               ret = plpar_hcall9(opcode, outs,
+                                  arg1, arg2, arg3, arg4, arg5,
+                                  arg6, arg7, arg8, arg9);
+
+               if (H_IS_LONG_BUSY(ret)) {
+                       sleep_msecs = get_longbusy_msecs(ret);
+                       msleep_interruptible(sleep_msecs);
+                       continue;
+               }
+
+               if (ret < H_SUCCESS)
+                       ehca_gen_err("opcode=%lx ret=%lx"
+                                    " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+                                    " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+                                    " arg9=%lx"
+                                    " out1=%lx out2=%lx out3=%lx out4=%lx"
+                                    " out5=%lx out6=%lx out7=%lx out8=%lx"
+                                    " out9=%lx",
+                                    opcode, ret,
+                                    arg1, arg2, arg3, arg4, arg5,
+                                    arg6, arg7, arg8, arg9,
+                                    outs[0], outs[1], outs[2], outs[3],
+                                    outs[4], outs[5], outs[6], outs[7],
+                                    outs[8]);
+
+               ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
+                            "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx "
+                            "out9=%lx",
+                            opcode, ret, outs[0], outs[1], outs[2], outs[3],
+                            outs[4], outs[5], outs[6], outs[7], outs[8]);
+               return ret;
+
+       }
+
+       return H_BUSY;
+}
+u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
+                            struct ehca_pfeq *pfeq,
+                            const u32 neq_control,
+                            const u32 number_of_entries,
+                            struct ipz_eq_handle *eq_handle,
+                            u32 *act_nr_of_entries,
+                            u32 *act_pages,
+                            u32 *eq_ist)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+       u64 allocate_controls;
+
+       /* resource type */
+       allocate_controls = 3ULL;
+
+       /* ISN is associated */
+       if (neq_control != 1)
+               allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
+       else /* notification event queue */
+               allocate_controls = (1ULL << 63) | allocate_controls;
+
+       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+                               adapter_handle.handle,  /* r4 */
+                               allocate_controls,      /* r5 */
+                               number_of_entries,      /* r6 */
+                               0, 0, 0, 0, 0, 0);
+       eq_handle->handle = outs[0];
+       *act_nr_of_entries = (u32)outs[3];
+       *act_pages = (u32)outs[4];
+       *eq_ist = (u32)outs[5];
+
+       if (ret == H_NOT_ENOUGH_RESOURCES)
+               ehca_gen_err("Not enough resource - ret=%lx ", ret);
+
+       return ret;
+}
+
+u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
+                      struct ipz_eq_handle eq_handle,
+                      const u64 event_mask)
+{
+       return ehca_plpar_hcall_norets(H_RESET_EVENTS,
+                                      adapter_handle.handle, /* r4 */
+                                      eq_handle.handle,      /* r5 */
+                                      event_mask,            /* r6 */
+                                      0, 0, 0, 0);
+}
+
+u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
+                            struct ehca_cq *cq,
+                            struct ehca_alloc_cq_parms *param)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+                               adapter_handle.handle,   /* r4  */
+                               2,                       /* r5  */
+                               param->eq_handle.handle, /* r6  */
+                               cq->token,               /* r7  */
+                               param->nr_cqe,           /* r8  */
+                               0, 0, 0, 0);
+       cq->ipz_cq_handle.handle = outs[0];
+       param->act_nr_of_entries = (u32)outs[3];
+       param->act_pages = (u32)outs[4];
+
+       if (ret == H_SUCCESS)
+               hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
+
+       if (ret == H_NOT_ENOUGH_RESOURCES)
+               ehca_gen_err("Not enough resources. ret=%lx", ret);
+
+       return ret;
+}
+
+u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
+                            struct ehca_qp *qp,
+                            struct ehca_alloc_qp_parms *parms)
+{
+       u64 ret;
+       u64 allocate_controls;
+       u64 max_r10_reg;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+       u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
+       u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
+       int daqp_ctrl = parms->daqp_ctrl;
+
+       allocate_controls =
+               EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
+                              (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
+                                (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
+                                (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
+                                parms->ud_av_l_key_ctl)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
+
+       max_r10_reg =
+               EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
+                              max_nr_send_wqes)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
+                                max_nr_receive_wqes)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
+                                parms->max_send_sge)
+               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
+                                parms->max_recv_sge);
+
+       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+                               adapter_handle.handle,             /* r4  */
+                               allocate_controls,                 /* r5  */
+                               qp->send_cq->ipz_cq_handle.handle,
+                               qp->recv_cq->ipz_cq_handle.handle,
+                               parms->ipz_eq_handle.handle,
+                               ((u64)qp->token << 32) | parms->pd.value,
+                               max_r10_reg,                       /* r10 */
+                               parms->ud_av_l_key_ctl,            /* r11 */
+                               0);
+       qp->ipz_qp_handle.handle = outs[0];
+       qp->real_qp_num = (u32)outs[1];
+       parms->act_nr_send_sges =
+               (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
+       parms->act_nr_recv_wqes =
+               (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
+       parms->act_nr_send_sges =
+               (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
+       parms->act_nr_recv_sges =
+               (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
+       parms->nr_sq_pages =
+               (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
+       parms->nr_rq_pages =
+               (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
+
+       if (ret == H_SUCCESS)
+               hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]);
+
+       if (ret == H_NOT_ENOUGH_RESOURCES)
+               ehca_gen_err("Not enough resources. ret=%lx", ret);
+
+       return ret;
+}
+
+u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
+                     const u8 port_id,
+                     struct hipz_query_port *query_port_response_block)
+{
+       u64 ret;
+       u64 r_cb = virt_to_abs(query_port_response_block);
+
+       if (r_cb & (EHCA_PAGESIZE-1)) {
+               ehca_gen_err("response block not page aligned");
+               return H_PARAMETER;
+       }
+
+       ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
+                                     adapter_handle.handle, /* r4 */
+                                     port_id,               /* r5 */
+                                     r_cb,                  /* r6 */
+                                     0, 0, 0, 0);
+
+       if (ehca_debug_level)
+               ehca_dmp(query_port_response_block, 64, "response_block");
+
+       return ret;
+}
+
+u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
+                    struct hipz_query_hca *query_hca_rblock)
+{
+       u64 r_cb = virt_to_abs(query_hca_rblock);
+
+       if (r_cb & (EHCA_PAGESIZE-1)) {
+               ehca_gen_err("response_block=%p not page aligned",
+                            query_hca_rblock);
+               return H_PARAMETER;
+       }
+
+       return ehca_plpar_hcall_norets(H_QUERY_HCA,
+                                      adapter_handle.handle, /* r4 */
+                                      r_cb,                  /* r5 */
+                                      0, 0, 0, 0, 0);
+}
+
+u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
+                         const u8 pagesize,
+                         const u8 queue_type,
+                         const u64 resource_handle,
+                         const u64 logical_address_of_page,
+                         u64 count)
+{
+       return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
+                                      adapter_handle.handle,      /* r4  */
+                                      queue_type | pagesize << 8, /* r5  */
+                                      resource_handle,            /* r6  */
+                                      logical_address_of_page,    /* r7  */
+                                      count,                      /* r8  */
+                                      0, 0);
+}
+
+u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
+                            const struct ipz_eq_handle eq_handle,
+                            struct ehca_pfeq *pfeq,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count)
+{
+       if (count != 1) {
+               ehca_gen_err("Ppage counter=%lx", count);
+               return H_PARAMETER;
+       }
+       return hipz_h_register_rpage(adapter_handle,
+                                    pagesize,
+                                    queue_type,
+                                    eq_handle.handle,
+                                    logical_address_of_page, count);
+}
+
+u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
+                          u32 ist)
+{
+       u64 ret;
+       ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
+                                     adapter_handle.handle, /* r4 */
+                                     ist,                   /* r5 */
+                                     0, 0, 0, 0, 0);
+
+       if (ret != H_SUCCESS && ret != H_BUSY)
+               ehca_gen_err("Could not query interrupt state.");
+
+       return ret;
+}
+
+u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
+                            const struct ipz_cq_handle cq_handle,
+                            struct ehca_pfcq *pfcq,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count,
+                            const struct h_galpa gal)
+{
+       if (count != 1) {
+               ehca_gen_err("Page counter=%lx", count);
+               return H_PARAMETER;
+       }
+
+       return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
+                                    cq_handle.handle, logical_address_of_page,
+                                    count);
+}
+
+u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
+                            const struct ipz_qp_handle qp_handle,
+                            struct ehca_pfqp *pfqp,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count,
+                            const struct h_galpa galpa)
+{
+       if (count != 1) {
+               ehca_gen_err("Page counter=%lx", count);
+               return H_PARAMETER;
+       }
+
+       return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
+                                    qp_handle.handle,logical_address_of_page,
+                                    count);
+}
+
+u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
+                              const struct ipz_qp_handle qp_handle,
+                              struct ehca_pfqp *pfqp,
+                              void **log_addr_next_sq_wqe2processed,
+                              void **log_addr_next_rq_wqe2processed,
+                              int dis_and_get_function_code)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
+                               adapter_handle.handle,     /* r4 */
+                               dis_and_get_function_code, /* r5 */
+                               qp_handle.handle,          /* r6 */
+                               0, 0, 0, 0, 0, 0);
+       if (log_addr_next_sq_wqe2processed)
+               *log_addr_next_sq_wqe2processed = (void*)outs[0];
+       if (log_addr_next_rq_wqe2processed)
+               *log_addr_next_rq_wqe2processed = (void*)outs[1];
+
+       return ret;
+}
+
+u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
+                    const struct ipz_qp_handle qp_handle,
+                    struct ehca_pfqp *pfqp,
+                    const u64 update_mask,
+                    struct hcp_modify_qp_control_block *mqpcb,
+                    struct h_galpa gal)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+       ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
+                               adapter_handle.handle, /* r4 */
+                               qp_handle.handle,      /* r5 */
+                               update_mask,           /* r6 */
+                               virt_to_abs(mqpcb),    /* r7 */
+                               0, 0, 0, 0, 0);
+
+       if (ret == H_NOT_ENOUGH_RESOURCES)
+               ehca_gen_err("Insufficient resources ret=%lx", ret);
+
+       return ret;
+}
+
+u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
+                   const struct ipz_qp_handle qp_handle,
+                   struct ehca_pfqp *pfqp,
+                   struct hcp_modify_qp_control_block *qqpcb,
+                   struct h_galpa gal)
+{
+       return ehca_plpar_hcall_norets(H_QUERY_QP,
+                                      adapter_handle.handle, /* r4 */
+                                      qp_handle.handle,      /* r5 */
+                                      virt_to_abs(qqpcb),    /* r6 */
+                                      0, 0, 0, 0);
+}
+
+u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
+                     struct ehca_qp *qp)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = hcp_galpas_dtor(&qp->galpas);
+       if (ret) {
+               ehca_gen_err("Could not destruct qp->galpas");
+               return H_RESOURCE;
+       }
+       ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
+                               adapter_handle.handle,     /* r4 */
+                               /* function code */
+                               1,                         /* r5 */
+                               qp->ipz_qp_handle.handle,  /* r6 */
+                               0, 0, 0, 0, 0, 0);
+       if (ret == H_HARDWARE)
+               ehca_gen_err("HCA not operational. ret=%lx", ret);
+
+       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+                                     adapter_handle.handle,     /* r4 */
+                                     qp->ipz_qp_handle.handle,  /* r5 */
+                                     0, 0, 0, 0, 0);
+
+       if (ret == H_RESOURCE)
+               ehca_gen_err("Resource still in use. ret=%lx", ret);
+
+       return ret;
+}
+
+u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u32 port)
+{
+       return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
+                                      adapter_handle.handle, /* r4 */
+                                      qp_handle.handle,      /* r5 */
+                                      port,                  /* r6 */
+                                      0, 0, 0, 0);
+}
+
+u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u32 port, u32 * pma_qp_nr,
+                      u32 * bma_qp_nr)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
+                               adapter_handle.handle, /* r4 */
+                               qp_handle.handle,      /* r5 */
+                               port,                  /* r6 */
+                               0, 0, 0, 0, 0, 0);
+       *pma_qp_nr = (u32)outs[0];
+       *bma_qp_nr = (u32)outs[1];
+
+       if (ret == H_ALIAS_EXIST)
+               ehca_gen_err("AQP1 already exists. ret=%lx", ret);
+
+       return ret;
+}
+
+u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u16 mcg_dlid,
+                      u64 subnet_prefix, u64 interface_id)
+{
+       u64 ret;
+
+       ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
+                                     adapter_handle.handle,  /* r4 */
+                                     qp_handle.handle,       /* r5 */
+                                     mcg_dlid,               /* r6 */
+                                     interface_id,           /* r7 */
+                                     subnet_prefix,          /* r8 */
+                                     0, 0);
+
+       if (ret == H_NOT_ENOUGH_RESOURCES)
+               ehca_gen_err("Not enough resources. ret=%lx", ret);
+
+       return ret;
+}
+
+u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u16 mcg_dlid,
+                      u64 subnet_prefix, u64 interface_id)
+{
+       return ehca_plpar_hcall_norets(H_DETACH_MCQP,
+                                      adapter_handle.handle, /* r4 */
+                                      qp_handle.handle,      /* r5 */
+                                      mcg_dlid,              /* r6 */
+                                      interface_id,          /* r7 */
+                                      subnet_prefix,         /* r8 */
+                                      0, 0);
+}
+
+u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
+                     struct ehca_cq *cq,
+                     u8 force_flag)
+{
+       u64 ret;
+
+       ret = hcp_galpas_dtor(&cq->galpas);
+       if (ret) {
+               ehca_gen_err("Could not destruct cp->galpas");
+               return H_RESOURCE;
+       }
+
+       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+                                     adapter_handle.handle,     /* r4 */
+                                     cq->ipz_cq_handle.handle,  /* r5 */
+                                     force_flag != 0 ? 1L : 0L, /* r6 */
+                                     0, 0, 0, 0);
+
+       if (ret == H_RESOURCE)
+               ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
+
+       return ret;
+}
+
+u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
+                     struct ehca_eq *eq)
+{
+       u64 ret;
+
+       ret = hcp_galpas_dtor(&eq->galpas);
+       if (ret) {
+               ehca_gen_err("Could not destruct eq->galpas");
+               return H_RESOURCE;
+       }
+
+       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+                                     adapter_handle.handle,     /* r4 */
+                                     eq->ipz_eq_handle.handle,  /* r5 */
+                                     0, 0, 0, 0, 0);
+
+       if (ret == H_RESOURCE)
+               ehca_gen_err("Resource in use. ret=%lx ", ret);
+
+       return ret;
+}
+
+u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
+                            const struct ehca_mr *mr,
+                            const u64 vaddr,
+                            const u64 length,
+                            const u32 access_ctrl,
+                            const struct ipz_pd pd,
+                            struct ehca_mr_hipzout_parms *outparms)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+                               adapter_handle.handle,            /* r4 */
+                               5,                                /* r5 */
+                               vaddr,                            /* r6 */
+                               length,                           /* r7 */
+                               (((u64)access_ctrl) << 32ULL),    /* r8 */
+                               pd.value,                         /* r9 */
+                               0, 0, 0);
+       outparms->handle.handle = outs[0];
+       outparms->lkey = (u32)outs[2];
+       outparms->rkey = (u32)outs[3];
+
+       return ret;
+}
+
+u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
+                            const struct ehca_mr *mr,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count)
+{
+       u64 ret;
+
+       if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
+               ehca_gen_err("logical_address_of_page not on a 4k boundary "
+                            "adapter_handle=%lx mr=%p mr_handle=%lx "
+                            "pagesize=%x queue_type=%x "
+                            "logical_address_of_page=%lx count=%lx",
+                            adapter_handle.handle, mr,
+                            mr->ipz_mr_handle.handle, pagesize, queue_type,
+                            logical_address_of_page, count);
+               ret = H_PARAMETER;
+       } else
+               ret = hipz_h_register_rpage(adapter_handle, pagesize,
+                                           queue_type,
+                                           mr->ipz_mr_handle.handle,
+                                           logical_address_of_page, count);
+       return ret;
+}
+
+u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
+                   const struct ehca_mr *mr,
+                   struct ehca_mr_hipzout_parms *outparms)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
+                               adapter_handle.handle,     /* r4 */
+                               mr->ipz_mr_handle.handle,  /* r5 */
+                               0, 0, 0, 0, 0, 0, 0);
+       outparms->len = outs[0];
+       outparms->vaddr = outs[1];
+       outparms->acl  = outs[4] >> 32;
+       outparms->lkey = (u32)(outs[5] >> 32);
+       outparms->rkey = (u32)(outs[5] & (0xffffffff));
+
+       return ret;
+}
+
+u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
+                           const struct ehca_mr *mr)
+{
+       return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+                                      adapter_handle.handle,    /* r4 */
+                                      mr->ipz_mr_handle.handle, /* r5 */
+                                      0, 0, 0, 0, 0);
+}
+
+u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
+                         const struct ehca_mr *mr,
+                         const u64 vaddr_in,
+                         const u64 length,
+                         const u32 access_ctrl,
+                         const struct ipz_pd pd,
+                         const u64 mr_addr_cb,
+                         struct ehca_mr_hipzout_parms *outparms)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
+                               adapter_handle.handle,    /* r4 */
+                               mr->ipz_mr_handle.handle, /* r5 */
+                               vaddr_in,                 /* r6 */
+                               length,                   /* r7 */
+                               /* r8 */
+                               ((((u64)access_ctrl) << 32ULL) | pd.value),
+                               mr_addr_cb,               /* r9 */
+                               0, 0, 0);
+       outparms->vaddr = outs[1];
+       outparms->lkey = (u32)outs[2];
+       outparms->rkey = (u32)outs[3];
+
+       return ret;
+}
+
+u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
+                       const struct ehca_mr *mr,
+                       const struct ehca_mr *orig_mr,
+                       const u64 vaddr_in,
+                       const u32 access_ctrl,
+                       const struct ipz_pd pd,
+                       struct ehca_mr_hipzout_parms *outparms)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
+                               adapter_handle.handle,            /* r4 */
+                               orig_mr->ipz_mr_handle.handle,    /* r5 */
+                               vaddr_in,                         /* r6 */
+                               (((u64)access_ctrl) << 32ULL),    /* r7 */
+                               pd.value,                         /* r8 */
+                               0, 0, 0, 0);
+       outparms->handle.handle = outs[0];
+       outparms->lkey = (u32)outs[2];
+       outparms->rkey = (u32)outs[3];
+
+       return ret;
+}
+
+u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
+                            const struct ehca_mw *mw,
+                            const struct ipz_pd pd,
+                            struct ehca_mw_hipzout_parms *outparms)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+                               adapter_handle.handle,      /* r4 */
+                               6,                          /* r5 */
+                               pd.value,                   /* r6 */
+                               0, 0, 0, 0, 0, 0);
+       outparms->handle.handle = outs[0];
+       outparms->rkey = (u32)outs[3];
+
+       return ret;
+}
+
+u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
+                   const struct ehca_mw *mw,
+                   struct ehca_mw_hipzout_parms *outparms)
+{
+       u64 ret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
+                               adapter_handle.handle,    /* r4 */
+                               mw->ipz_mw_handle.handle, /* r5 */
+                               0, 0, 0, 0, 0, 0, 0);
+       outparms->rkey = (u32)outs[3];
+
+       return ret;
+}
+
+u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
+                           const struct ehca_mw *mw)
+{
+       return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+                                      adapter_handle.handle,    /* r4 */
+                                      mw->ipz_mw_handle.handle, /* r5 */
+                                      0, 0, 0, 0, 0);
+}
+
+u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
+                     const u64 ressource_handle,
+                     void *rblock,
+                     unsigned long *byte_count)
+{
+       u64 r_cb = virt_to_abs(rblock);
+
+       if (r_cb & (EHCA_PAGESIZE-1)) {
+               ehca_gen_err("rblock not page aligned.");
+               return H_PARAMETER;
+       }
+
+       return ehca_plpar_hcall_norets(H_ERROR_DATA,
+                                      adapter_handle.handle,
+                                      ressource_handle,
+                                      r_cb,
+                                      0, 0, 0, 0);
+}
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
new file mode 100644 (file)
index 0000000..587ebd4
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Firmware Infiniband Interface code for POWER
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Gerd Bayer <gerd.bayer@de.ibm.com>
+ *           Waleri Fomin <fomin@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HCP_IF_H__
+#define __HCP_IF_H__
+
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "hipz_hw.h"
+
+/*
+ * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initalize
+ * resources, create the empty EQPT (ring).
+ */
+u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
+                            struct ehca_pfeq *pfeq,
+                            const u32 neq_control,
+                            const u32 number_of_entries,
+                            struct ipz_eq_handle *eq_handle,
+                            u32 * act_nr_of_entries,
+                            u32 * act_pages,
+                            u32 * eq_ist);
+
+u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
+                      struct ipz_eq_handle eq_handle,
+                      const u64 event_mask);
+/*
+ * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
+ * resources, create the empty CQPT (ring).
+ */
+u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
+                            struct ehca_cq *cq,
+                            struct ehca_alloc_cq_parms *param);
+
+
+/*
+ * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
+ * initialize resources, create empty QPPTs (2 rings).
+ */
+u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
+                            struct ehca_qp *qp,
+                            struct ehca_alloc_qp_parms *parms);
+
+u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
+                     const u8 port_id,
+                     struct hipz_query_port *query_port_response_block);
+
+u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
+                    struct hipz_query_hca *query_hca_rblock);
+
+/*
+ * hipz_h_register_rpage internal function in hcp_if.h for all
+ * hcp_H_REGISTER_RPAGE calls.
+ */
+u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
+                         const u8 pagesize,
+                         const u8 queue_type,
+                         const u64 resource_handle,
+                         const u64 logical_address_of_page,
+                         u64 count);
+
+u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
+                            const struct ipz_eq_handle eq_handle,
+                            struct ehca_pfeq *pfeq,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count);
+
+u64 hipz_h_query_int_state(const struct ipz_adapter_handle
+                          hcp_adapter_handle,
+                          u32 ist);
+
+u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
+                            const struct ipz_cq_handle cq_handle,
+                            struct ehca_pfcq *pfcq,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count,
+                            const struct h_galpa gal);
+
+u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
+                            const struct ipz_qp_handle qp_handle,
+                            struct ehca_pfqp *pfqp,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count,
+                            const struct h_galpa galpa);
+
+u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
+                              const struct ipz_qp_handle qp_handle,
+                              struct ehca_pfqp *pfqp,
+                              void **log_addr_next_sq_wqe_tb_processed,
+                              void **log_addr_next_rq_wqe_tb_processed,
+                              int dis_and_get_function_code);
+enum hcall_sigt {
+       HCALL_SIGT_NO_CQE = 0,
+       HCALL_SIGT_BY_WQE = 1,
+       HCALL_SIGT_EVERY = 2
+};
+
+u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
+                    const struct ipz_qp_handle qp_handle,
+                    struct ehca_pfqp *pfqp,
+                    const u64 update_mask,
+                    struct hcp_modify_qp_control_block *mqpcb,
+                    struct h_galpa gal);
+
+u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
+                   const struct ipz_qp_handle qp_handle,
+                   struct ehca_pfqp *pfqp,
+                   struct hcp_modify_qp_control_block *qqpcb,
+                   struct h_galpa gal);
+
+u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
+                     struct ehca_qp *qp);
+
+u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u32 port);
+
+u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u32 port, u32 * pma_qp_nr,
+                      u32 * bma_qp_nr);
+
+u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u16 mcg_dlid,
+                      u64 subnet_prefix, u64 interface_id);
+
+u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
+                      const struct ipz_qp_handle qp_handle,
+                      struct h_galpa gal,
+                      u16 mcg_dlid,
+                      u64 subnet_prefix, u64 interface_id);
+
+u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
+                     struct ehca_cq *cq,
+                     u8 force_flag);
+
+u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
+                     struct ehca_eq *eq);
+
+/*
+ * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
+ * resources.
+ */
+u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
+                            const struct ehca_mr *mr,
+                            const u64 vaddr,
+                            const u64 length,
+                            const u32 access_ctrl,
+                            const struct ipz_pd pd,
+                            struct ehca_mr_hipzout_parms *outparms);
+
+/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
+u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
+                            const struct ehca_mr *mr,
+                            const u8 pagesize,
+                            const u8 queue_type,
+                            const u64 logical_address_of_page,
+                            const u64 count);
+
+/* hipz_h_query_mr queries MR in HW and FW */
+u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
+                   const struct ehca_mr *mr,
+                   struct ehca_mr_hipzout_parms *outparms);
+
+/* hipz_h_free_resource_mr frees MR resources in HW and FW */
+u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
+                           const struct ehca_mr *mr);
+
+/* hipz_h_reregister_pmr reregisters MR in HW and FW */
+u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
+                         const struct ehca_mr *mr,
+                         const u64 vaddr_in,
+                         const u64 length,
+                         const u32 access_ctrl,
+                         const struct ipz_pd pd,
+                         const u64 mr_addr_cb,
+                         struct ehca_mr_hipzout_parms *outparms);
+
+/* hipz_h_register_smr register shared MR in HW and FW */
+u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
+                       const struct ehca_mr *mr,
+                       const struct ehca_mr *orig_mr,
+                       const u64 vaddr_in,
+                       const u32 access_ctrl,
+                       const struct ipz_pd pd,
+                       struct ehca_mr_hipzout_parms *outparms);
+
+/*
+ * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
+ * resources.
+ */
+u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
+                            const struct ehca_mw *mw,
+                            const struct ipz_pd pd,
+                            struct ehca_mw_hipzout_parms *outparms);
+
+/* hipz_h_query_mw queries MW in HW and FW */
+u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
+                   const struct ehca_mw *mw,
+                   struct ehca_mw_hipzout_parms *outparms);
+
+/* hipz_h_free_resource_mw frees MW resources in HW and FW */
+u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
+                           const struct ehca_mw *mw);
+
+u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
+                     const u64 ressource_handle,
+                     void *rblock,
+                     unsigned long *byte_count);
+
+#endif /* __HCP_IF_H__ */
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
new file mode 100644 (file)
index 0000000..0b1a477
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *   load store abstraction for ehca register access with tracing
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "hipz_hw.h"
+
+int hcall_map_page(u64 physaddr, u64 *mapaddr)
+{
+       *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
+       return 0;
+}
+
+int hcall_unmap_page(u64 mapaddr)
+{
+       iounmap((volatile void __iomem*)mapaddr);
+       return 0;
+}
+
+int hcp_galpas_ctor(struct h_galpas *galpas,
+                   u64 paddr_kernel, u64 paddr_user)
+{
+       int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
+       if (ret)
+               return ret;
+
+       galpas->user.fw_handle = paddr_user;
+
+       return 0;
+}
+
+int hcp_galpas_dtor(struct h_galpas *galpas)
+{
+       if (galpas->kernel.fw_handle) {
+               int ret = hcall_unmap_page(galpas->kernel.fw_handle);
+               if (ret)
+                       return ret;
+       }
+
+       galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
new file mode 100644 (file)
index 0000000..5305c2a
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  Firmware calls
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Waleri Fomin <fomin@de.ibm.com>
+ *           Gerd Bayer <gerd.bayer@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HCP_PHYP_H__
+#define __HCP_PHYP_H__
+
+
+/*
+ * eHCA page (mapped into memory)
+ * resource to access eHCA register pages in CPU address space
+*/
+struct h_galpa {
+       u64 fw_handle;
+       /* for pSeries this is a 64bit memory address where
+          I/O memory is mapped into CPU address space (kv) */
+};
+
+/*
+ * resource to access eHCA address space registers, all types
+ */
+struct h_galpas {
+       u32 pid;                /*PID of userspace galpa checking */
+       struct h_galpa user;    /* user space accessible resource,
+                                  set to 0 if unused */
+       struct h_galpa kernel;  /* kernel space accessible resource,
+                                  set to 0 if unused */
+};
+
+static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
+{
+       u64 addr = galpa.fw_handle + offset;
+       return *(volatile u64 __force *)addr;
+}
+
+static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
+{
+       u64 addr = galpa.fw_handle + offset;
+       *(volatile u64 __force *)addr = value;
+}
+
+int hcp_galpas_ctor(struct h_galpas *galpas,
+                   u64 paddr_kernel, u64 paddr_user);
+
+int hcp_galpas_dtor(struct h_galpas *galpas);
+
+int hcall_map_page(u64 physaddr, u64 * mapaddr);
+
+int hcall_unmap_page(u64 mapaddr);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/infiniband/hw/ehca/hipz_fns.h
new file mode 100644 (file)
index 0000000..9dac93d
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  HW abstraction register functions
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIPZ_FNS_H__
+#define __HIPZ_FNS_H__
+
+#include "ehca_classes.h"
+#include "hipz_hw.h"
+
+#include "hipz_fns_core.h"
+
+#define hipz_galpa_store_eq(gal, offset, value) \
+       hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_eq(gal, offset) \
+       hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
+
+#define hipz_galpa_store_qped(gal, offset, value) \
+       hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_qped(gal, offset) \
+       hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
+
+#define hipz_galpa_store_mrmw(gal, offset, value) \
+       hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_mrmw(gal, offset) \
+       hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h
new file mode 100644 (file)
index 0000000..20898a1
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  HW abstraction register functions
+ *
+ *  Authors: Christoph Raisch <raisch@de.ibm.com>
+ *           Heiko J Schick <schickhj@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIPZ_FNS_CORE_H__
+#define __HIPZ_FNS_CORE_H__
+
+#include "hcp_phyp.h"
+#include "hipz_hw.h"
+
+#define hipz_galpa_store_cq(gal, offset, value) \
+       hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_cq(gal, offset) \
+       hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
+
+#define hipz_galpa_store_qp(gal,offset, value) \
+       hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
+#define hipz_galpa_load_qp(gal, offset) \
+       hipz_galpa_load(gal,QPTEMM_OFFSET(offset))
+
+static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
+{
+       /*  ringing doorbell :-) */
+       hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
+                           EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
+}
+
+static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
+{
+       /*  ringing doorbell :-) */
+       hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
+                           EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
+}
+
+static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
+{
+       hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
+                           EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
+}
+
+static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
+{
+       u64 cqx_n0_reg;
+
+       hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
+                           EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
+                                          value));
+       cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
+}
+
+static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
+{
+       u64 cqx_n1_reg;
+
+       hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
+                           EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
+       cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
+}
+
+#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
new file mode 100644 (file)
index 0000000..3fc92b0
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  eHCA register definitions
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIPZ_HW_H__
+#define __HIPZ_HW_H__
+
+#include "ehca_tools.h"
+
+/* QP Table Entry Memory Map */
+struct hipz_qptemm {
+       u64 qpx_hcr;
+       u64 qpx_c;
+       u64 qpx_herr;
+       u64 qpx_aer;
+/* 0x20*/
+       u64 qpx_sqa;
+       u64 qpx_sqc;
+       u64 qpx_rqa;
+       u64 qpx_rqc;
+/* 0x40*/
+       u64 qpx_st;
+       u64 qpx_pmstate;
+       u64 qpx_pmfa;
+       u64 qpx_pkey;
+/* 0x60*/
+       u64 qpx_pkeya;
+       u64 qpx_pkeyb;
+       u64 qpx_pkeyc;
+       u64 qpx_pkeyd;
+/* 0x80*/
+       u64 qpx_qkey;
+       u64 qpx_dqp;
+       u64 qpx_dlidp;
+       u64 qpx_portp;
+/* 0xa0*/
+       u64 qpx_slidp;
+       u64 qpx_slidpp;
+       u64 qpx_dlida;
+       u64 qpx_porta;
+/* 0xc0*/
+       u64 qpx_slida;
+       u64 qpx_slidpa;
+       u64 qpx_slvl;
+       u64 qpx_ipd;
+/* 0xe0*/
+       u64 qpx_mtu;
+       u64 qpx_lato;
+       u64 qpx_rlimit;
+       u64 qpx_rnrlimit;
+/* 0x100*/
+       u64 qpx_t;
+       u64 qpx_sqhp;
+       u64 qpx_sqptp;
+       u64 qpx_nspsn;
+/* 0x120*/
+       u64 qpx_nspsnhwm;
+       u64 reserved1;
+       u64 qpx_sdsi;
+       u64 qpx_sdsbc;
+/* 0x140*/
+       u64 qpx_sqwsize;
+       u64 qpx_sqwts;
+       u64 qpx_lsn;
+       u64 qpx_nssn;
+/* 0x160 */
+       u64 qpx_mor;
+       u64 qpx_cor;
+       u64 qpx_sqsize;
+       u64 qpx_erc;
+/* 0x180*/
+       u64 qpx_rnrrc;
+       u64 qpx_ernrwt;
+       u64 qpx_rnrresp;
+       u64 qpx_lmsna;
+/* 0x1a0 */
+       u64 qpx_sqhpc;
+       u64 qpx_sqcptp;
+       u64 qpx_sigt;
+       u64 qpx_wqecnt;
+/* 0x1c0*/
+       u64 qpx_rqhp;
+       u64 qpx_rqptp;
+       u64 qpx_rqsize;
+       u64 qpx_nrr;
+/* 0x1e0*/
+       u64 qpx_rdmac;
+       u64 qpx_nrpsn;
+       u64 qpx_lapsn;
+       u64 qpx_lcr;
+/* 0x200*/
+       u64 qpx_rwc;
+       u64 qpx_rwva;
+       u64 qpx_rdsi;
+       u64 qpx_rdsbc;
+/* 0x220*/
+       u64 qpx_rqwsize;
+       u64 qpx_crmsn;
+       u64 qpx_rdd;
+       u64 qpx_larpsn;
+/* 0x240*/
+       u64 qpx_pd;
+       u64 qpx_scqn;
+       u64 qpx_rcqn;
+       u64 qpx_aeqn;
+/* 0x260*/
+       u64 qpx_aaelog;
+       u64 qpx_ram;
+       u64 qpx_rdmaqe0;
+       u64 qpx_rdmaqe1;
+/* 0x280*/
+       u64 qpx_rdmaqe2;
+       u64 qpx_rdmaqe3;
+       u64 qpx_nrpsnhwm;
+/* 0x298*/
+       u64 reserved[(0x400 - 0x298) / 8];
+/* 0x400 extended data */
+       u64 reserved_ext[(0x500 - 0x400) / 8];
+/* 0x500 */
+       u64 reserved2[(0x1000 - 0x500) / 8];
+/* 0x1000      */
+};
+
+#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
+#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
+
+#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
+
+/* MRMWPT Entry Memory Map */
+struct hipz_mrmwmm {
+       /* 0x00 */
+       u64 mrx_hcr;
+
+       u64 mrx_c;
+       u64 mrx_herr;
+       u64 mrx_aer;
+       /* 0x20 */
+       u64 mrx_pp;
+       u64 reserved1;
+       u64 reserved2;
+       u64 reserved3;
+       /* 0x40 */
+       u64 reserved4[(0x200 - 0x40) / 8];
+       /* 0x200 */
+       u64 mrx_ctl[64];
+
+};
+
+#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x)
+
+struct hipz_qpedmm {
+       /* 0x00 */
+       u64 reserved0[(0x400) / 8];
+       /* 0x400 */
+       u64 qpedx_phh;
+       u64 qpedx_ppsgp;
+       /* 0x410 */
+       u64 qpedx_ppsgu;
+       u64 qpedx_ppdgp;
+       /* 0x420 */
+       u64 qpedx_ppdgu;
+       u64 qpedx_aph;
+       /* 0x430 */
+       u64 qpedx_apsgp;
+       u64 qpedx_apsgu;
+       /* 0x440 */
+       u64 qpedx_apdgp;
+       u64 qpedx_apdgu;
+       /* 0x450 */
+       u64 qpedx_apav;
+       u64 qpedx_apsav;
+       /* 0x460  */
+       u64 qpedx_hcr;
+       u64 reserved1[4];
+       /* 0x488 */
+       u64 qpedx_rrl0;
+       /* 0x490 */
+       u64 qpedx_rrrkey0;
+       u64 qpedx_rrva0;
+       /* 0x4a0 */
+       u64 reserved2;
+       u64 qpedx_rrl1;
+       /* 0x4b0 */
+       u64 qpedx_rrrkey1;
+       u64 qpedx_rrva1;
+       /* 0x4c0 */
+       u64 reserved3;
+       u64 qpedx_rrl2;
+       /* 0x4d0 */
+       u64 qpedx_rrrkey2;
+       u64 qpedx_rrva2;
+       /* 0x4e0 */
+       u64 reserved4;
+       u64 qpedx_rrl3;
+       /* 0x4f0 */
+       u64 qpedx_rrrkey3;
+       u64 qpedx_rrva3;
+};
+
+#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
+
+/* CQ Table Entry Memory Map */
+struct hipz_cqtemm {
+       u64 cqx_hcr;
+       u64 cqx_c;
+       u64 cqx_herr;
+       u64 cqx_aer;
+/* 0x20  */
+       u64 cqx_ptp;
+       u64 cqx_tp;
+       u64 cqx_fec;
+       u64 cqx_feca;
+/* 0x40  */
+       u64 cqx_ep;
+       u64 cqx_eq;
+/* 0x50  */
+       u64 reserved1;
+       u64 cqx_n0;
+/* 0x60  */
+       u64 cqx_n1;
+       u64 reserved2[(0x1000 - 0x60) / 8];
+/* 0x1000 */
+};
+
+#define CQX_FEC_CQE_CNT           EHCA_BMASK_IBM(32,63)
+#define CQX_FECADDER              EHCA_BMASK_IBM(32,63)
+#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0)
+#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0)
+
+#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
+
+/* EQ Table Entry Memory Map */
+struct hipz_eqtemm {
+       u64 eqx_hcr;
+       u64 eqx_c;
+
+       u64 eqx_herr;
+       u64 eqx_aer;
+/* 0x20 */
+       u64 eqx_ptp;
+       u64 eqx_tp;
+       u64 eqx_ssba;
+       u64 eqx_psba;
+
+/* 0x40 */
+       u64 eqx_cec;
+       u64 eqx_meql;
+       u64 eqx_xisbi;
+       u64 eqx_xisc;
+/* 0x60 */
+       u64 eqx_it;
+
+};
+
+#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x)
+
+/* access control defines for MR/MW */
+#define HIPZ_ACCESSCTRL_L_WRITE  0x00800000
+#define HIPZ_ACCESSCTRL_R_WRITE  0x00400000
+#define HIPZ_ACCESSCTRL_R_READ   0x00200000
+#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
+#define HIPZ_ACCESSCTRL_MW_BIND  0x00080000
+
+/* query hca response block */
+struct hipz_query_hca {
+       u32 cur_reliable_dg;
+       u32 cur_qp;
+       u32 cur_cq;
+       u32 cur_eq;
+       u32 cur_mr;
+       u32 cur_mw;
+       u32 cur_ee_context;
+       u32 cur_mcast_grp;
+       u32 cur_qp_attached_mcast_grp;
+       u32 reserved1;
+       u32 cur_ipv6_qp;
+       u32 cur_eth_qp;
+       u32 cur_hp_mr;
+       u32 reserved2[3];
+       u32 max_rd_domain;
+       u32 max_qp;
+       u32 max_cq;
+       u32 max_eq;
+       u32 max_mr;
+       u32 max_hp_mr;
+       u32 max_mw;
+       u32 max_mrwpte;
+       u32 max_special_mrwpte;
+       u32 max_rd_ee_context;
+       u32 max_mcast_grp;
+       u32 max_total_mcast_qp_attach;
+       u32 max_mcast_qp_attach;
+       u32 max_raw_ipv6_qp;
+       u32 max_raw_ethy_qp;
+       u32 internal_clock_frequency;
+       u32 max_pd;
+       u32 max_ah;
+       u32 max_cqe;
+       u32 max_wqes_wq;
+       u32 max_partitions;
+       u32 max_rr_ee_context;
+       u32 max_rr_qp;
+       u32 max_rr_hca;
+       u32 max_act_wqs_ee_context;
+       u32 max_act_wqs_qp;
+       u32 max_sge;
+       u32 max_sge_rd;
+       u32 memory_page_size_supported;
+       u64 max_mr_size;
+       u32 local_ca_ack_delay;
+       u32 num_ports;
+       u32 vendor_id;
+       u32 vendor_part_id;
+       u32 hw_ver;
+       u64 node_guid;
+       u64 hca_cap_indicators;
+       u32 data_counter_register_size;
+       u32 max_shared_rq;
+       u32 max_isns_eq;
+       u32 max_neq;
+} __attribute__ ((packed));
+
+/* query port response block */
+struct hipz_query_port {
+       u32 state;
+       u32 bad_pkey_cntr;
+       u32 lmc;
+       u32 lid;
+       u32 subnet_timeout;
+       u32 qkey_viol_cntr;
+       u32 sm_sl;
+       u32 sm_lid;
+       u32 capability_mask;
+       u32 init_type_reply;
+       u32 pkey_tbl_len;
+       u32 gid_tbl_len;
+       u64 gid_prefix;
+       u32 port_nr;
+       u16 pkey_entries[16];
+       u8  reserved1[32];
+       u32 trent_size;
+       u32 trbuf_size;
+       u64 max_msg_sz;
+       u32 max_mtu;
+       u32 vl_cap;
+       u8  reserved2[1900];
+       u64 guid_entries[255];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
new file mode 100644 (file)
index 0000000..e028ff1
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  internal queue handling
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_tools.h"
+#include "ipz_pt_fn.h"
+
+void *ipz_qpageit_get_inc(struct ipz_queue *queue)
+{
+       void *ret = ipz_qeit_get(queue);
+       queue->current_q_offset += queue->pagesize;
+       if (queue->current_q_offset > queue->queue_length) {
+               queue->current_q_offset -= queue->pagesize;
+               ret = NULL;
+       }
+       if (((u64)ret) % EHCA_PAGESIZE) {
+               ehca_gen_err("ERROR!! not at PAGE-Boundary");
+               return NULL;
+       }
+       return ret;
+}
+
+void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
+{
+       void *ret = ipz_qeit_get(queue);
+       u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+
+       queue->current_q_offset += queue->qe_size;
+       if (queue->current_q_offset > last_entry_in_q) {
+               queue->current_q_offset = 0;
+               queue->toggle_state = (~queue->toggle_state) & 1;
+       }
+
+       return ret;
+}
+
+int ipz_queue_ctor(struct ipz_queue *queue,
+                  const u32 nr_of_pages,
+                  const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
+{
+       int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
+       int f;
+
+       if (pagesize > PAGE_SIZE) {
+               ehca_gen_err("FATAL ERROR: pagesize=%x is greater "
+                            "than kernel page size", pagesize);
+               return 0;
+       }
+       if (!pages_per_kpage) {
+               ehca_gen_err("FATAL ERROR: invalid kernel page size. "
+                            "pages_per_kpage=%x", pages_per_kpage);
+               return 0;
+       }
+       queue->queue_length = nr_of_pages * pagesize;
+       queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+       if (!queue->queue_pages) {
+               ehca_gen_err("ERROR!! didn't get the memory");
+               return 0;
+       }
+       memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
+       /*
+        * allocate pages for queue:
+        * outer loop allocates whole kernel pages (page aligned) and
+        * inner loop divides a kernel page into smaller hca queue pages
+        */
+       f = 0;
+       while (f < nr_of_pages) {
+               u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+               int k;
+               if (!kpage)
+                       goto ipz_queue_ctor_exit0; /*NOMEM*/
+               for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) {
+                       (queue->queue_pages)[f] = (struct ipz_page *)kpage;
+                       kpage += EHCA_PAGESIZE;
+                       f++;
+               }
+       }
+
+       queue->current_q_offset = 0;
+       queue->qe_size = qe_size;
+       queue->act_nr_of_sg = nr_of_sg;
+       queue->pagesize = pagesize;
+       queue->toggle_state = 1;
+       return 1;
+
+ ipz_queue_ctor_exit0:
+       ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x",
+                    queue, f, nr_of_pages);
+       for (f = 0; f < nr_of_pages; f += pages_per_kpage) {
+               if (!(queue->queue_pages)[f])
+                       break;
+               free_page((unsigned long)(queue->queue_pages)[f]);
+       }
+       return 0;
+}
+
+int ipz_queue_dtor(struct ipz_queue *queue)
+{
+       int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
+       int g;
+       int nr_pages;
+
+       if (!queue || !queue->queue_pages) {
+               ehca_gen_dbg("queue or queue_pages is NULL");
+               return 0;
+       }
+       nr_pages = queue->queue_length / queue->pagesize;
+       for (g = 0; g < nr_pages; g += pages_per_kpage)
+               free_page((unsigned long)(queue->queue_pages)[g]);
+       vfree(queue->queue_pages);
+
+       return 1;
+}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
new file mode 100644 (file)
index 0000000..2f13509
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ *  IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ *  internal queue handling
+ *
+ *  Authors: Waleri Fomin <fomin@de.ibm.com>
+ *           Reinhard Ernst <rernst@de.ibm.com>
+ *           Christoph Raisch <raisch@de.ibm.com>
+ *
+ *  Copyright (c) 2005 IBM Corporation
+ *
+ *  All rights reserved.
+ *
+ *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ *  BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __IPZ_PT_FN_H__
+#define __IPZ_PT_FN_H__
+
+#define EHCA_PAGESHIFT   12
+#define EHCA_PAGESIZE   4096UL
+#define EHCA_PAGEMASK   (~(EHCA_PAGESIZE-1))
+#define EHCA_PT_ENTRIES 512UL
+
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+
+/* struct generic ehca page */
+struct ipz_page {
+       u8 entries[EHCA_PAGESIZE];
+};
+
+/* struct generic queue in linux kernel virtual memory (kv) */
+struct ipz_queue {
+       u64 current_q_offset;   /* current queue entry */
+
+       struct ipz_page **queue_pages;  /* array of pages belonging to queue */
+       u32 qe_size;            /* queue entry size */
+       u32 act_nr_of_sg;
+       u32 queue_length;       /* queue length allocated in bytes */
+       u32 pagesize;
+       u32 toggle_state;       /* toggle flag - per page */
+       u32 dummy3;             /* 64 bit alignment */
+};
+
+/*
+ * return current Queue Entry for a certain q_offset
+ * returns address (kv) of Queue Entry
+ */
+static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
+{
+       struct ipz_page *current_page;
+       if (q_offset >= queue->queue_length)
+               return NULL;
+       current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
+       return  &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
+}
+
+/*
+ * return current Queue Entry
+ * returns address (kv) of Queue Entry
+ */
+static inline void *ipz_qeit_get(struct ipz_queue *queue)
+{
+       return ipz_qeit_calc(queue, queue->current_q_offset);
+}
+
+/*
+ * return current Queue Page , increment Queue Page iterator from
+ * page to page in struct ipz_queue, last increment will return 0! and
+ * NOT wrap
+ * returns address (kv) of Queue Page
+ * warning don't use in parallel with ipz_QE_get_inc()
+ */
+void *ipz_qpageit_get_inc(struct ipz_queue *queue);
+
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ */
+static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
+{
+       void *ret = ipz_qeit_get(queue);
+       queue->current_q_offset += queue->qe_size;
+       if (queue->current_q_offset >= queue->queue_length) {
+               queue->current_q_offset = 0;
+               /* toggle the valid flag */
+               queue->toggle_state = (~queue->toggle_state) & 1;
+       }
+
+       return ret;
+}
+
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * returns 0 and does not increment, if wrong valid state
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ */
+static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
+{
+       struct ehca_cqe *cqe = ipz_qeit_get(queue);
+       u32 cqe_flags = cqe->cqe_flags;
+
+       if ((cqe_flags >> 7) != (queue->toggle_state & 1))
+               return NULL;
+
+       ipz_qeit_get_inc(queue);
+       return cqe;
+}
+
+/*
+ * returns and resets Queue Entry iterator
+ * returns address (kv) of first Queue Entry
+ */
+static inline void *ipz_qeit_reset(struct ipz_queue *queue)
+{
+       queue->current_q_offset = 0;
+       return ipz_qeit_get(queue);
+}
+
+/* struct generic page table */
+struct ipz_pt {
+       u64 entries[EHCA_PT_ENTRIES];
+};
+
+/* struct page table for a queue, only to be used in pf */
+struct ipz_qpt {
+       /* queue page tables (kv), use u64 because we know the element length */
+       u64 *qpts;
+       u32 n_qpts;
+       u32 n_ptes;       /*  number of page table entries */
+       u64 *current_pte_addr;
+};
+
+/*
+ * constructor for a ipz_queue_t, placement new for ipz_queue_t,
+ * new for all dependent datastructors
+ * all QP Tables are the same
+ * flow:
+ *    allocate+pin queue
+ * see ipz_qpt_ctor()
+ * returns true if ok, false if out of memory
+ */
+int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages,
+                  const u32 pagesize, const u32 qe_size,
+                  const u32 nr_of_sg);
+
+/*
+ * destructor for a ipz_queue_t
+ *  -# free queue
+ *  see ipz_queue_ctor()
+ *  returns true if ok, false if queue was NULL-ptr of free failed
+ */
+int ipz_queue_dtor(struct ipz_queue *queue);
+
+/*
+ * constructor for a ipz_qpt_t,
+ * placement new for struct ipz_queue, new for all dependent datastructors
+ * all QP Tables are the same,
+ * flow:
+ * -# allocate+pin queue
+ * -# initialise ptcb
+ * -# allocate+pin PTs
+ * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
+ * -# the ring must have room for exactly nr_of_PTEs
+ * see ipz_qpt_ctor()
+ */
+void ipz_qpt_ctor(struct ipz_qpt *qpt,
+                 const u32 nr_of_qes,
+                 const u32 pagesize,
+                 const u32 qe_size,
+                 const u8 lowbyte, const u8 toggle,
+                 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
+
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ * fix EQ page problems
+ */
+void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
+
+/*
+ * return current Event Queue Entry, increment Queue Entry iterator
+ * by one step in struct ipz_queue if valid, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * returns 0 and does not increment, if wrong valid state
+ * warning don't use in parallel with ipz_queue_QPageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ */
+static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
+{
+       void *ret = ipz_qeit_get(queue);
+       u32 qe = *(u8 *) ret;
+       if ((qe >> 7) != (queue->toggle_state & 1))
+               return NULL;
+       ipz_qeit_eq_get_inc(queue); /* this is a good one */
+       return ret;
+}
+
+/* returns address (GX) of first queue entry */
+static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
+{
+       return be64_to_cpu(qpt->qpts[0]);
+}
+
+/* returns address (kv) of first page of queue page table */
+static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
+{
+       return qpt->qpts;
+}
+
+#endif                         /* __IPZ_PT_FN_H__ */